entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
GELU
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return F.relu(x, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr1 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return arg0_1,
class GELUNew(nn.Module):
def __init__(self):
super(GELUNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
akulaarora/pre-training
|
GELU
| false
| 14,779
|
[
"Apache-2.0"
] | 107
|
312ae1ec1ec279da557543184fc064dade76dbbd
|
https://github.com/akulaarora/pre-training/tree/312ae1ec1ec279da557543184fc064dade76dbbd
|
BCEWithLogitsLoss
|
import torch
import torch as th
import torch.nn as nn
class BCEWithLogitsLoss(nn.Module):
def __init__(self, weight=None):
super().__init__()
self.loss = th.nn.BCEWithLogitsLoss(weight=weight)
def forward(self, x, target):
return self.loss(x, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch as th
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEWithLogitsLossNew(nn.Module):
def __init__(self, weight=None):
super().__init__()
self.loss = th.nn.BCEWithLogitsLoss(weight=weight)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
albanie/collaborative-experts
|
BCEWithLogitsLoss
| false
| 14,780
|
[
"Apache-2.0"
] | 237
|
b41defc4fb8de451809014c970ccbe518621909f
|
https://github.com/albanie/collaborative-experts/tree/b41defc4fb8de451809014c970ccbe518621909f
|
ConcatReLU
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def concat_relu(x):
"""Concatenated ReLU (http://arxiv.org/abs/1603.05201)."""
return F.relu(torch.cat([x, -x], dim=1))
class ConcatReLU(nn.Module):
"""Concatenated ReLU (http://arxiv.org/abs/1603.05201)."""
def forward(self, input):
return concat_relu(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = -tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_relu_0[grid(512)](arg0_1, buf0, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def concat_relu(x):
"""Concatenated ReLU (http://arxiv.org/abs/1603.05201)."""
return F.relu(torch.cat([x, -x], dim=1))
class ConcatReLUNew(nn.Module):
"""Concatenated ReLU (http://arxiv.org/abs/1603.05201)."""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alisiahkoohi/survae_flows
|
ConcatReLU
| false
| 14,781
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
down_shifted_conv2d
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
def down_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :xs[2] - 1, :]
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class down_shifted_conv2d(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3),
stride=(1, 1), shift_output_down=False, norm='weight_norm'):
super(down_shifted_conv2d, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride)
self.shift_output_down = shift_output_down
self.norm = norm
self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int((
filter_size[1] - 1) / 2), filter_size[0] - 1, 0))
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_down:
self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0,
0, 1, 0)))
def forward(self, x, mask=None):
assert mask is None
x = self.pad(x)
x = self.conv(x)
x = self.bn(x) if self.norm == 'batch_norm' else x
return self.down_shift(x) if self.shift_output_down else x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_filters_in': 4, 'num_filters_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 5
x0 = xindex % 6
x2 = xindex // 30
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -1 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp2 & tmp4
tmp8 = tmp7 & tmp6
tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 24 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 2, 3), (24, 6, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 2, 3), (24, 6, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3,
primals_2, buf3, 4, 24, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3
def down_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :xs[2] - 1, :]
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class down_shifted_conv2dNew(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3),
stride=(1, 1), shift_output_down=False, norm='weight_norm'):
super(down_shifted_conv2dNew, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride)
self.shift_output_down = shift_output_down
self.norm = norm
self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int((
filter_size[1] - 1) / 2), filter_size[0] - 1, 0))
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_down:
self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0,
0, 1, 0)))
def forward(self, input_0):
primals_4 = self.conv.bias
primals_2 = self.conv.weight_g
primals_3 = self.conv.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ajayjain/lmconv
|
down_shifted_conv2d
| false
| 14,782
|
[
"MIT"
] | 69
|
e00576de5118702c90493e88c6e459b0e45d1290
|
https://github.com/ajayjain/lmconv/tree/e00576de5118702c90493e88c6e459b0e45d1290
|
ConcatELU
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def concat_elu(x):
"""Like concatenated ReLU (http://arxiv.org/abs/1603.05201), but with ELU instead."""
return F.elu(torch.cat([x, -x], dim=1))
class ConcatELU(nn.Module):
"""Like concatenated ReLU (http://arxiv.org/abs/1603.05201), but with ELU instead."""
def forward(self, input):
return concat_elu(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = -tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tmp14 = 0.0
tmp15 = tmp13 > tmp14
tmp16 = 1.0
tmp17 = tmp13 * tmp16
tmp18 = libdevice.expm1(tmp17)
tmp19 = tmp18 * tmp16
tmp20 = tl.where(tmp15, tmp17, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_elu_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def concat_elu(x):
"""Like concatenated ReLU (http://arxiv.org/abs/1603.05201), but with ELU instead."""
return F.elu(torch.cat([x, -x], dim=1))
class ConcatELUNew(nn.Module):
"""Like concatenated ReLU (http://arxiv.org/abs/1603.05201), but with ELU instead."""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alisiahkoohi/survae_flows
|
ConcatELU
| false
| 14,783
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
LandmarkHead
|
import torch
import torch.nn as nn
from itertools import product as product
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (20, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 20, 64, 64), (81920, 1, 1280, 20))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 20), (81920, 1280, 20,
1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 8192, 10), (81920, 10, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(327680)](buf3, primals_2, 327680,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class LandmarkHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(LandmarkHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ai18435136351/facenet-retinaface-pytorch
|
LandmarkHead
| false
| 14,784
|
[
"MIT"
] | 48
|
f228969e46d7402170b708798a210de552879d16
|
https://github.com/ai18435136351/facenet-retinaface-pytorch/tree/f228969e46d7402170b708798a210de552879d16
|
AutoregressiveShift
|
import torch
import torch.nn as nn
class AutoregressiveShift(nn.Module):
"""Shifts input right to make model autoregressive."""
def __init__(self, embed_dim):
super(AutoregressiveShift, self).__init__()
self.embed_dim = embed_dim
self.first_token = nn.Parameter(torch.Tensor(1, 1, embed_dim))
self._reset_parameters()
def _reset_parameters(self):
nn.init.xavier_uniform_(self.first_token)
def forward(self, x):
first_token = self.first_token.expand(1, x.shape[1], self.embed_dim)
return torch.cat([first_token, x[:-1]], dim=0)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x0 = xindex % 4
x3 = xindex % 16
x4 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x3 + 16 * (-1 + x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](primals_1, primals_2, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
return buf0,
class AutoregressiveShiftNew(nn.Module):
"""Shifts input right to make model autoregressive."""
def __init__(self, embed_dim):
super(AutoregressiveShiftNew, self).__init__()
self.embed_dim = embed_dim
self.first_token = nn.Parameter(torch.Tensor(1, 1, embed_dim))
self._reset_parameters()
def _reset_parameters(self):
nn.init.xavier_uniform_(self.first_token)
def forward(self, input_0):
primals_1 = self.first_token
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
alisiahkoohi/survae_flows
|
AutoregressiveShift
| false
| 14,785
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
EPELoss
|
import torch
import torch.nn as nn
class EPELoss(nn.Module):
def __init__(self):
super(EPELoss, self).__init__()
def forward(self, output, target):
lossvalue = torch.norm(output - target + 1e-16, p=2, dim=1).mean()
return lossvalue
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp6 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp18 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = 1e-16
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 64.0
tmp29 = tmp27 / tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_linalg_vector_norm_mean_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class EPELossNew(nn.Module):
def __init__(self):
super(EPELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
aishmittal/DocProj
|
EPELoss
| false
| 14,786
|
[
"MIT"
] | 246
|
761e27927ab7a83f48e347921dc023d45a9d394f
|
https://github.com/aishmittal/DocProj/tree/761e27927ab7a83f48e347921dc023d45a9d394f
|
RewardCriterion
|
import torch
import torch.nn as nn
from torch.autograd import *
import torch.nn
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward, reduction='mean'):
N, L = input.shape[:2]
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = to_contiguous(input).view(-1)
reward = to_contiguous(reward).view(-1)
mask = (seq > 0).float()
mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1),
mask[:, :-1]], 1)).view(-1)
output = -input * reward * mask
if reduction == 'none':
output = output.view(N, L).sum(1) / mask.view(N, L).sum(1)
elif reduction == 'mean':
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = r0 % 4
tmp12 = tl.full([1, 1], 0, tl.int64)
tmp14 = tl.full([1, 1], 1, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = 1.0
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp11 >= tmp14
tl.full([1, 1], 4, tl.int64)
tmp22 = tl.load(in_ptr0 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4),
[XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 > tmp12
tmp24 = tmp23.to(tl.float32)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp15, tmp18, tmp26)
tmp28 = tmp10 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp35 = tmp31 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class RewardCriterionNew(nn.Module):
def __init__(self):
super(RewardCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
aliabd/cos-cvae
|
RewardCriterion
| false
| 14,787
|
[
"Apache-2.0"
] | 53
|
d6f94dd0f1de6727e43da55d36a6433fbfd0c44b
|
https://github.com/aliabd/cos-cvae/tree/d6f94dd0f1de6727e43da55d36a6433fbfd0c44b
|
MLP
|
import torch
import torch.nn as nn
from torch.autograd import *
import torch.nn.parallel
import torch.utils.data
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'mid_size': 4, 'out_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.autograd import *
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLPNew(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLPNew, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, input_0):
primals_1 = self.fc.linear.weight
primals_2 = self.fc.linear.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
alfred100p/VC-R-CNN
|
MLP
| false
| 14,788
|
[
"MIT"
] | 344
|
c887f5b6db6932fb5c828c8037e299ce5baadb9e
|
https://github.com/alfred100p/VC-R-CNN/tree/c887f5b6db6932fb5c828c8037e299ce5baadb9e
|
Linear_dynamics
|
import torch
import torch.utils.data
from torch import nn
class Linear_dynamics(nn.Module):
def __init__(self, device='cpu'):
super(Linear_dynamics, self).__init__()
self.time = nn.Parameter(torch.ones(1) * 0.7)
self.device = device
self
def forward(self, x, v):
return x + v * self.time
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_3, primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class Linear_dynamicsNew(nn.Module):
def __init__(self, device='cpu'):
super(Linear_dynamicsNew, self).__init__()
self.time = nn.Parameter(torch.ones(1) * 0.7)
self.device = device
self
def forward(self, input_0, input_1):
primals_1 = self.time
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
alanpaivaa/egnn
|
Linear_dynamics
| false
| 14,789
|
[
"MIT"
] | 142
|
e9ca6c0c3e1d30a7598efbd66034121b4af8dccc
|
https://github.com/alanpaivaa/egnn/tree/e9ca6c0c3e1d30a7598efbd66034121b4af8dccc
|
PositionalEncoding1d
|
import math
import torch
import torch.nn as nn
class PositionalEncoding1d(nn.Module):
"""
Learning positional embeddings.
Args:
shape: Iterable, the shape of the input.
embedding_dim: int, the size of each embedding vector.
"""
def __init__(self, size, embedding_dim):
super(PositionalEncoding1d, self).__init__()
self.size = size
self.embedding_dim = embedding_dim
self.encode_l = nn.Parameter(torch.Tensor(size, 1, embedding_dim))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.encode_l, std=0.125 / math.sqrt(self.
embedding_dim))
def forward(self, x):
return x + self.encode_l
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4, 'embedding_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class PositionalEncoding1dNew(nn.Module):
"""
Learning positional embeddings.
Args:
shape: Iterable, the shape of the input.
embedding_dim: int, the size of each embedding vector.
"""
def __init__(self, size, embedding_dim):
super(PositionalEncoding1dNew, self).__init__()
self.size = size
self.embedding_dim = embedding_dim
self.encode_l = nn.Parameter(torch.Tensor(size, 1, embedding_dim))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.encode_l, std=0.125 / math.sqrt(self.
embedding_dim))
def forward(self, input_0):
primals_1 = self.encode_l
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
alisiahkoohi/survae_flows
|
PositionalEncoding1d
| false
| 14,790
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
SilogLoss
|
import torch
import torch.nn as nn
class SilogLoss(nn.Module):
def __init__(self, ratio=10, ratio2=0.85):
super().__init__()
self.ratio = ratio
self.ratio2 = ratio2
def forward(self, pred, gt):
log_diff = torch.log(pred * self.ratio) - torch.log(gt * self.ratio)
silog1 = torch.mean(log_diff ** 2)
silog2 = self.ratio2 * log_diff.mean() ** 2
silog_loss = torch.sqrt(silog1 - silog2) * self.ratio
return silog_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_log_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = 10.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = tmp4 * tmp1
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.broadcast_to(tmp7, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp11 / tmp15
tmp17 = tmp14 / tmp15
tmp18 = tmp17 * tmp17
tmp19 = 0.85
tmp20 = tmp18 * tmp19
tmp21 = tmp16 - tmp20
tmp22 = libdevice.sqrt(tmp21)
tmp23 = tmp22 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_log_mean_mul_pow_sqrt_sub_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class SilogLossNew(nn.Module):
def __init__(self, ratio=10, ratio2=0.85):
super().__init__()
self.ratio = ratio
self.ratio2 = ratio2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
aliyun/dro-sfm
|
SilogLoss
| false
| 14,791
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
PositionalEncodingImage
|
import math
import torch
import torch.nn as nn
class PositionalEncodingImage(nn.Module):
"""
Learning positional embeddings for images.
Embeddings for channel, height and width are added to form the full positional embedding.
These encodings correspond to the ones from Sparse Transformers (https://arxiv.org/abs/1904.10509).
Args:
image_shape: Iterable, the shape of the image.
embedding_dim: int, the size of each embedding vector.
"""
def __init__(self, image_shape, embedding_dim):
super(PositionalEncodingImage, self).__init__()
assert len(image_shape
) == 3, 'image_shape should have length 3: (C,H,W)'
self.image_shape = image_shape
self.embedding_dim = embedding_dim
c, h, w = image_shape
self.encode_c = nn.Parameter(torch.Tensor(1, c, 1, 1, embedding_dim))
self.encode_h = nn.Parameter(torch.Tensor(1, 1, h, 1, embedding_dim))
self.encode_w = nn.Parameter(torch.Tensor(1, 1, 1, w, embedding_dim))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.encode_c, std=0.125 / math.sqrt(3 * self.
embedding_dim))
nn.init.normal_(self.encode_h, std=0.125 / math.sqrt(3 * self.
embedding_dim))
nn.init.normal_(self.encode_w, std=0.125 / math.sqrt(3 * self.
embedding_dim))
def forward(self, x):
return x + self.encode_c + self.encode_h + self.encode_w
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'image_shape': [4, 4, 4], 'embedding_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
x7 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr3 + x7, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x4, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1, 4), (16, 4, 4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 1, 4, 1, 4), (16, 16, 4, 4, 1))
assert_size_stride(primals_4, (1, 1, 1, 4, 4), (16, 16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, primals_3,
primals_4, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
del primals_4
return buf0,
class PositionalEncodingImageNew(nn.Module):
"""
Learning positional embeddings for images.
Embeddings for channel, height and width are added to form the full positional embedding.
These encodings correspond to the ones from Sparse Transformers (https://arxiv.org/abs/1904.10509).
Args:
image_shape: Iterable, the shape of the image.
embedding_dim: int, the size of each embedding vector.
"""
def __init__(self, image_shape, embedding_dim):
super(PositionalEncodingImageNew, self).__init__()
assert len(image_shape
) == 3, 'image_shape should have length 3: (C,H,W)'
self.image_shape = image_shape
self.embedding_dim = embedding_dim
c, h, w = image_shape
self.encode_c = nn.Parameter(torch.Tensor(1, c, 1, 1, embedding_dim))
self.encode_h = nn.Parameter(torch.Tensor(1, 1, h, 1, embedding_dim))
self.encode_w = nn.Parameter(torch.Tensor(1, 1, 1, w, embedding_dim))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.encode_c, std=0.125 / math.sqrt(3 * self.
embedding_dim))
nn.init.normal_(self.encode_h, std=0.125 / math.sqrt(3 * self.
embedding_dim))
nn.init.normal_(self.encode_w, std=0.125 / math.sqrt(3 * self.
embedding_dim))
def forward(self, input_0):
primals_1 = self.encode_c
primals_3 = self.encode_h
primals_4 = self.encode_w
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
alisiahkoohi/survae_flows
|
PositionalEncodingImage
| false
| 14,792
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
DumbFeat
|
import torch
import torch.nn as nn
import torch.optim
class DumbFeat(nn.Module):
def __init__(self, dropout):
super().__init__()
if dropout > 0.0:
self.dropout = torch.nn.Dropout(p=dropout, inplace=False)
else:
self.dropout = None
def forward(self, x):
if x.dim() > 2:
x = x.view(x.size(0), -1)
assert x.dim() == 2
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DumbFeatNew(nn.Module):
def __init__(self, dropout):
super().__init__()
if dropout > 0.0:
self.dropout = torch.nn.Dropout(p=dropout, inplace=False)
else:
self.dropout = None
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alisure-fork/BF3S
|
DumbFeat
| false
| 14,793
|
[
"Apache-2.0"
] | 130
|
99cfb7ce4696f2585bb7c2502f234e60c55e8007
|
https://github.com/alisure-fork/BF3S/tree/99cfb7ce4696f2585bb7c2502f234e60c55e8007
|
BerHuLoss
|
import torch
import torch.nn as nn
class BerHuLoss(nn.Module):
"""Class implementing the BerHu loss."""
def __init__(self, threshold=0.2):
"""
Initializes the BerHuLoss class.
Parameters
----------
threshold : float
Mask parameter
"""
super().__init__()
self.threshold = threshold
def forward(self, pred, gt):
"""
Calculates the BerHu loss.
Parameters
----------
pred : torch.Tensor [B,1,H,W]
Predicted inverse depth map
gt : torch.Tensor [B,1,H,W]
Ground-truth inverse depth map
Returns
-------
loss : torch.Tensor [1]
BerHu loss
"""
huber_c = torch.max(pred - gt)
huber_c = self.threshold * huber_c
diff = (pred - gt).abs()
huber_mask = (diff > huber_c).detach()
diff2 = diff[huber_mask]
diff2 = diff2 ** 2
return torch.cat((diff, diff2)).mean()
def get_inputs():
return [torch.rand([4]), torch.rand([4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_gt_max_mul_sub_0(in_ptr0, in_ptr1, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tl_math.abs(tmp2)
tmp7 = 0.2
tmp8 = tmp5 * tmp7
tmp9 = tmp6 > tmp8
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp6, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4,), (1,))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.bool)
get_raw_stream(0)
triton_per_fused_abs_gt_max_mul_sub_0[grid(1)](arg0_1, arg1_1, buf0,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0, buf2
class BerHuLossNew(nn.Module):
"""Class implementing the BerHu loss."""
def __init__(self, threshold=0.2):
"""
Initializes the BerHuLoss class.
Parameters
----------
threshold : float
Mask parameter
"""
super().__init__()
self.threshold = threshold
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
aliyun/dro-sfm
|
BerHuLoss
| false
| 14,794
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
MultinomialNLLLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
def _reduce(x, reduction='elementwise_mean'):
if reduction == 'none':
return x
elif reduction == 'elementwise_mean':
return x.mean()
elif reduction == 'sum':
return x.sum()
else:
raise ValueError('No such reduction {} defined'.format(reduction))
class MultinomialNLLLoss(nn.Module):
"""
Computes the negative log-likelihood of the multinomial distribution.
.. math::
\\ell(x, y) = L = - y \\cdot \\log(softmax(x))
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'elementwise_mean' | 'sum'. 'none': no reduction will be applied,
'elementwise_mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'elementwise_mean'
"""
def __init__(self, reduction='elementwise_mean'):
super(MultinomialNLLLoss, self).__init__()
self.reduction = reduction
def forward(self, input, target):
loss = -target * F.log_softmax(input, dim=1)
return _reduce(loss, reduction=self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp2 = tl.load(in_ptr1 + r3, None)
tmp3 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp4 = tl_math.exp(tmp3)
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tmp1 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_mean_mul_neg_1[grid(1)](buf2, arg0_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
def _reduce(x, reduction='elementwise_mean'):
if reduction == 'none':
return x
elif reduction == 'elementwise_mean':
return x.mean()
elif reduction == 'sum':
return x.sum()
else:
raise ValueError('No such reduction {} defined'.format(reduction))
class MultinomialNLLLossNew(nn.Module):
"""
Computes the negative log-likelihood of the multinomial distribution.
.. math::
\\ell(x, y) = L = - y \\cdot \\log(softmax(x))
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'elementwise_mean' | 'sum'. 'none': no reduction will be applied,
'elementwise_mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'elementwise_mean'
"""
def __init__(self, reduction='elementwise_mean'):
super(MultinomialNLLLossNew, self).__init__()
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
akshayka/gavel
|
MultinomialNLLLoss
| false
| 14,795
|
[
"MIT"
] | 67
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
https://github.com/akshayka/gavel/tree/40a22a725f2e70478483e98c9b07c6fc588e0c40
|
GatedTanhUnit
|
import torch
import torch.nn as nn
def gated_tanh(x, dim):
"""Gated Tanh activation."""
x_tanh, x_sigmoid = torch.chunk(x, 2, dim=dim)
return torch.tanh(x_tanh) * torch.sigmoid(x_sigmoid)
class GatedTanhUnit(nn.Module):
"""Gated Tanh activation."""
def __init__(self, dim=-1):
super(GatedTanhUnit, self).__init__()
self.dim = dim
def forward(self, x):
return gated_tanh(x, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(128)](arg0_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def gated_tanh(x, dim):
"""Gated Tanh activation."""
x_tanh, x_sigmoid = torch.chunk(x, 2, dim=dim)
return torch.tanh(x_tanh) * torch.sigmoid(x_sigmoid)
class GatedTanhUnitNew(nn.Module):
"""Gated Tanh activation."""
def __init__(self, dim=-1):
super(GatedTanhUnitNew, self).__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alisiahkoohi/survae_flows
|
GatedTanhUnit
| false
| 14,796
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
GatedConv2d
|
import torch
import torch.nn as nn
class GatedConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding):
super(GatedConv2d, self).__init__()
self.in_channels = in_channels
self.conv = nn.Conv2d(in_channels, out_channels * 3, kernel_size=
kernel_size, padding=padding)
def forward(self, x):
h = self.conv(x)
a, b, c = torch.chunk(h, chunks=3, dim=1)
return a + b * torch.sigmoid(c)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 324
x1 = xindex // 324
x2 = xindex
tmp0 = tl.load(in_ptr0 + (648 + x0 + 972 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (x0 + 972 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (324 + x0 + 972 * x1), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x2, tmp1, xmask)
tl.store(out_ptr1 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 12, 9, 9), (972, 81, 9, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(3888)](buf1, primals_2, 3888,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(1296)](buf1, buf2, buf3,
1296, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_1, primals_3, reinterpret_tensor(buf1, (4, 4, 9, 9
), (972, 81, 9, 1), 324), buf2
class GatedConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding):
super(GatedConv2dNew, self).__init__()
self.in_channels = in_channels
self.conv = nn.Conv2d(in_channels, out_channels * 3, kernel_size=
kernel_size, padding=padding)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
alisiahkoohi/survae_flows
|
GatedConv2d
| false
| 14,797
|
[
"MIT"
] | 262
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
https://github.com/alisiahkoohi/survae_flows/tree/e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
Alignment
|
from _paritybench_helpers import _mock_config
from torch.nn import Module
import math
import torch
import torch.nn.functional as f
import torch.nn as nn
class Module(nn.Module):
def __init__(self):
super().__init__()
self.summary = {}
def add_summary(self, name, val):
if self.training:
self.summary[name] = val.clone().detach().cpu().numpy()
def get_summary(self, base_name=''):
summary = {}
if base_name:
base_name += '/'
if self.summary:
summary.update({(base_name + name): val for name, val in self.
summary.items()})
for name, child in self.named_children():
if hasattr(child, 'get_summary'):
name = base_name + name
summary.update(child.get_summary(name))
return summary
class Alignment(Module):
def __init__(self, args, __):
super().__init__()
self.temperature = nn.Parameter(torch.tensor(1 / math.sqrt(args.
hidden_size)))
def _attention(self, a, b):
return torch.matmul(a, b.transpose(1, 2)) * self.temperature
def forward(self, a, b, mask_a, mask_b):
attn = self._attention(a, b)
mask = torch.matmul(mask_a.float(), mask_b.transpose(1, 2).float())
if tuple(torch.__version__.split('.')) < ('1', '2'):
mask = mask.byte()
else:
mask = mask.bool()
attn.masked_fill_(~mask, -10000000.0)
attn_a = f.softmax(attn, dim=1)
attn_b = f.softmax(attn, dim=2)
feature_b = torch.matmul(attn_a.transpose(1, 2), a)
feature_a = torch.matmul(attn_b, b)
self.add_summary('temperature', self.temperature)
self.add_summary('attention_a', attn_a)
self.add_summary('attention_b', attn_b)
return feature_a, feature_b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(hidden_size=4), '__': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_bitwise_not_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_mul_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x4 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1)
tmp8 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1)
tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1)
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr0 + (x2 + 16 * x3), xmask).to(tl.int1)
tmp34 = tl.load(in_ptr1 + (x2 + 16 * x3), xmask)
tmp37 = tl.load(in_ptr0 + (4 + x2 + 16 * x3), xmask).to(tl.int1)
tmp38 = tl.load(in_ptr1 + (4 + x2 + 16 * x3), xmask)
tmp42 = tl.load(in_ptr0 + (8 + x2 + 16 * x3), xmask).to(tl.int1)
tmp43 = tl.load(in_ptr1 + (8 + x2 + 16 * x3), xmask)
tmp47 = tl.load(in_ptr0 + (12 + x2 + 16 * x3), xmask).to(tl.int1)
tmp48 = tl.load(in_ptr1 + (12 + x2 + 16 * x3), xmask)
tmp4 = tmp1 * tmp3
tmp5 = -10000000.0
tmp6 = tl.where(tmp0, tmp5, tmp4)
tmp9 = tmp8 * tmp3
tmp10 = tl.where(tmp7, tmp5, tmp9)
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp14 = tmp13 * tmp3
tmp15 = tl.where(tmp12, tmp5, tmp14)
tmp16 = triton_helpers.maximum(tmp11, tmp15)
tmp19 = tmp18 * tmp3
tmp20 = tl.where(tmp17, tmp5, tmp19)
tmp21 = triton_helpers.maximum(tmp16, tmp20)
tmp22 = tmp6 - tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp10 - tmp21
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp15 - tmp21
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp20 - tmp21
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp5, tmp35)
tmp39 = tmp38 * tmp3
tmp40 = tl.where(tmp37, tmp5, tmp39)
tmp41 = triton_helpers.maximum(tmp36, tmp40)
tmp44 = tmp43 * tmp3
tmp45 = tl.where(tmp42, tmp5, tmp44)
tmp46 = triton_helpers.maximum(tmp41, tmp45)
tmp49 = tmp48 * tmp3
tmp50 = tl.where(tmp47, tmp5, tmp49)
tmp51 = triton_helpers.maximum(tmp46, tmp50)
tmp52 = tmp36 - tmp51
tmp53 = tl_math.exp(tmp52)
tmp54 = tmp40 - tmp51
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp57 = tmp45 - tmp51
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp60 = tmp50 - tmp51
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tl.store(out_ptr0 + x4, tmp21, xmask)
tl.store(out_ptr1 + x4, tmp32, xmask)
tl.store(out_ptr2 + x4, tmp51, xmask)
tl.store(out_ptr3 + x4, tmp62, xmask)
@triton.jit
def triton_poi_fused__softmax_clone_masked_fill_mul_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex // 64
x6 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x5, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x5, xmask)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (x6 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr4 + (x6 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr5 + (x0 + 4 * x4), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr6 + (x0 + 4 * x4), xmask, eviction_policy=
'evict_last')
tmp4 = tmp1 * tmp3
tmp5 = -10000000.0
tmp6 = tl.where(tmp0, tmp5, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp13 = tmp6 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp11, xmask)
tl.store(out_ptr1 + x5, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
buf2 = buf0
del buf0
triton_poi_fused_clone_0[grid(256)](primals_5, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__to_copy_bitwise_not_1[grid(256)](buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32)
triton_poi_fused__softmax_masked_fill_mul_2[grid(64)](buf4, buf1,
primals_3, buf5, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf9 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf11 = buf2
del buf2
triton_poi_fused__softmax_clone_masked_fill_mul_3[grid(256)](buf4,
buf1, primals_3, buf5, buf6, buf7, buf8, buf9, buf11, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
del buf7
del buf8
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0),
out=buf10)
buf12 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0)
del buf9
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0),
out=buf12)
del buf11
return reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, buf1, buf4, reinterpret_tensor(primals_1, (16, 4, 4),
(16, 1, 4), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 1, 4), 0
)
class Module(nn.Module):
def __init__(self):
super().__init__()
self.summary = {}
def add_summary(self, name, val):
if self.training:
self.summary[name] = val.clone().detach().cpu().numpy()
def get_summary(self, base_name=''):
summary = {}
if base_name:
base_name += '/'
if self.summary:
summary.update({(base_name + name): val for name, val in self.
summary.items()})
for name, child in self.named_children():
if hasattr(child, 'get_summary'):
name = base_name + name
summary.update(child.get_summary(name))
return summary
class AlignmentNew(Module):
def __init__(self, args, __):
super().__init__()
self.temperature = nn.Parameter(torch.tensor(1 / math.sqrt(args.
hidden_size)))
def _attention(self, a, b):
return torch.matmul(a, b.transpose(1, 2)) * self.temperature
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.temperature
primals_1 = input_0
primals_2 = input_1
primals_4 = input_2
primals_5 = input_3
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
alibaba-edu/simple-effective-text-matching-pytorch
|
Alignment
| false
| 14,798
|
[
"Apache-2.0"
] | 278
|
05d572e30801b235e989c78c95dd24d5f5d35f74
|
https://github.com/alibaba-edu/simple-effective-text-matching-pytorch/tree/05d572e30801b235e989c78c95dd24d5f5d35f74
|
MegatronGelu
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class MegatronGelu(torch.nn.Module):
def forward(self, x):
return x * 0.5 * (torch.erf(x / 1.41421) + 1.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071085623775818
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MegatronGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
almiliMSFT/onnxruntime
|
MegatronGelu
| false
| 14,799
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
MegatronFastGelu
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class MegatronFastGelu(torch.nn.Module):
def forward(self, x):
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 +
0.044715 * x * x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608028654
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MegatronFastGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
almiliMSFT/onnxruntime
|
MegatronFastGelu
| false
| 14,800
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
MyCustomFunctionReluModel
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class MyCustomFunctionReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
class MyReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
self.relu = MyReLU.apply
def forward(self, input):
return self.relu(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MyCustomFunctionReluModelNew(torch.nn.Module):
def __init__(self):
super().__init__()
class MyReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
self.relu = MyReLU.apply
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
almiliMSFT/onnxruntime
|
MyCustomFunctionReluModel
| false
| 14,801
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
LayerNorm
|
import torch
import torch.nn as nn
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class LayerNorm(nn.Module):
def __init__(self, hidden_size, epsilon, cast_fp16=True, formula=0):
super().__init__()
self.layer_norm = nn.LayerNorm(hidden_size, eps=epsilon)
self.layer_norm.bias.data.normal_(mean=0.0, std=0.1)
self.layer_norm.weight.data.normal_(mean=0.0, std=0.5)
self.cast_fp16 = cast_fp16
self.formula = formula
self.epsilon = epsilon
@staticmethod
def get_fused_op():
return 'LayerNormalization'
def my_layer_norm(self, x):
if self.formula == 0:
return self.layer_norm(x)
x = x.float()
u = x.mean(-1, keepdim=True)
y = x - u
s = y.pow(2).mean(-1, keepdim=True)
z = y / torch.sqrt(s + self.epsilon)
return self.layer_norm.weight.data * z + self.layer_norm.bias.data
def forward(self, x):
if self.cast_fp16 and x.dtype == torch.float16:
y = self.my_layer_norm(x.to(torch.float32))
else:
y = self.my_layer_norm(x)
return y,
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'epsilon': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = tmp20 + tmp7
tmp22 = libdevice.rsqrt(tmp21)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return buf2, primals_1
class LayerNormNew(nn.Module):
def __init__(self, hidden_size, epsilon, cast_fp16=True, formula=0):
super().__init__()
self.layer_norm = nn.LayerNorm(hidden_size, eps=epsilon)
self.layer_norm.bias.data.normal_(mean=0.0, std=0.1)
self.layer_norm.weight.data.normal_(mean=0.0, std=0.5)
self.cast_fp16 = cast_fp16
self.formula = formula
self.epsilon = epsilon
@staticmethod
def get_fused_op():
return 'LayerNormalization'
def my_layer_norm(self, x):
if self.formula == 0:
return self.layer_norm(x)
x = x.float()
u = x.mean(-1, keepdim=True)
y = x - u
s = y.pow(2).mean(-1, keepdim=True)
z = y / torch.sqrt(s + self.epsilon)
return self.layer_norm.weight.data * z + self.layer_norm.bias.data
def forward(self, input_0):
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
almiliMSFT/onnxruntime
|
LayerNorm
| false
| 14,802
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
DepthHead
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DepthHead(nn.Module):
def __init__(self, input_dim=256, hidden_dim=128, scale=False):
super(DepthHead, self).__init__()
self.scale = scale
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 1, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x_d, act_fn=F.tanh):
out = self.conv2(self.relu(self.conv1(x_d)))
return act_fn(out)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_4, (1, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2097152)](buf1, primals_2,
2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_tanh_1[grid(16384)](buf3, primals_5,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf3
class DepthHeadNew(nn.Module):
def __init__(self, input_dim=256, hidden_dim=128, scale=False):
super(DepthHeadNew, self).__init__()
self.scale = scale
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 1, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
aliyun/dro-sfm
|
DepthHead
| false
| 14,803
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
FeatBlock
|
import torch
import torch.nn as nn
class FeatBlock(nn.Module):
def __init__(self, planes=128, out_dim=128):
super().__init__()
self.conv1 = nn.Conv2d(planes, planes, 3, padding=1)
self.conv2 = nn.Conv2d(planes, out_dim, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.relu(self.conv1(self.relu(x)))
x = self.conv2(x)
return x
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (y0 + 128 * x2 + 524288 * y1), tmp2, ymask)
tl.store(out_ptr1 + (x2 + 4096 * y3), tmp2, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 524288 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16384, 9)](primals_2, buf0, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_0[grid(16384, 9)](primals_4, buf1, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.float32)
triton_poi_fused_relu_1[grid(512, 4096)](primals_1, buf2, primals_1,
512, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(2097152)](buf4, primals_3,
2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf5 = extern_kernels.convolution(buf4, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf6 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(512, 4096)](buf5, primals_5,
buf6, 512, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf5
del primals_5
return buf6, buf0, buf1, buf2, buf4
class FeatBlockNew(nn.Module):
def __init__(self, planes=128, out_dim=128):
super().__init__()
self.conv1 = nn.Conv2d(planes, planes, 3, padding=1)
self.conv2 = nn.Conv2d(planes, out_dim, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
aliyun/dro-sfm
|
FeatBlock
| false
| 14,804
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
ProjectionInputDepth
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ProjectionInputDepth(nn.Module):
def __init__(self, cost_dim, hidden_dim, out_chs):
super().__init__()
self.out_chs = out_chs
self.convc1 = nn.Conv2d(cost_dim, hidden_dim, 1, padding=0)
self.convc2 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convd1 = nn.Conv2d(1, hidden_dim, 7, padding=3)
self.convd2 = nn.Conv2d(hidden_dim, 64, 3, padding=1)
self.convd = nn.Conv2d(64 + hidden_dim, out_chs - 1, 3, padding=1)
def forward(self, depth, cost):
cor = F.relu(self.convc1(cost))
cor = F.relu(self.convc2(cor))
dfm = F.relu(self.convd1(depth))
dfm = F.relu(self.convd2(dfm))
cor_dfm = torch.cat([cor, dfm], dim=1)
out_d = F.relu(self.convd(cor_dfm))
return torch.cat([out_d, depth], dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64]), torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'cost_dim': 4, 'hidden_dim': 4, 'out_chs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 68
x0 = xindex % 4096
x2 = xindex // 278528
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 68, tl.int64)
tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-4 + x1) + 262144 * x2), tmp12,
other=0.0)
tmp16 = tl.load(in_ptr3 + (-4 + x1), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x3, tmp21, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 4
x0 = xindex % 4096
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp15 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_9, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_10, (64,), (1,))
assert_size_stride(primals_11, (3, 68, 3, 3), (612, 9, 3, 1))
assert_size_stride(primals_12, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(65536)](buf1, primals_2,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf3 = extern_kernels.convolution(primals_8, primals_6, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_0[grid(65536)](buf4, primals_7,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf6 = empty_strided_cuda((4, 68, 64, 64), (278528, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(1114112)](buf2, primals_5, buf5,
primals_10, buf6, 1114112, XBLOCK=1024, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf8 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(65536)](buf7, primals_12, primals_8,
buf8, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(49152)](
buf7, primals_12, buf9, 49152, XBLOCK=256, num_warps=4,
num_stages=1)
del buf7
del primals_12
buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(1048576)](
buf5, primals_10, buf10, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf5
del primals_10
buf11 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(65536)](
buf2, primals_5, buf11, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf2
del primals_5
return (buf8, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_9, primals_11, buf1, buf4, buf6, buf9, buf10, buf11)
class ProjectionInputDepthNew(nn.Module):
def __init__(self, cost_dim, hidden_dim, out_chs):
super().__init__()
self.out_chs = out_chs
self.convc1 = nn.Conv2d(cost_dim, hidden_dim, 1, padding=0)
self.convc2 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convd1 = nn.Conv2d(1, hidden_dim, 7, padding=3)
self.convd2 = nn.Conv2d(hidden_dim, 64, 3, padding=1)
self.convd = nn.Conv2d(64 + hidden_dim, out_chs - 1, 3, padding=1)
def forward(self, input_0, input_1):
primals_1 = self.convc1.weight
primals_2 = self.convc1.bias
primals_4 = self.convc2.weight
primals_5 = self.convc2.bias
primals_6 = self.convd1.weight
primals_7 = self.convd1.bias
primals_9 = self.convd2.weight
primals_10 = self.convd2.bias
primals_11 = self.convd.weight
primals_12 = self.convd.bias
primals_8 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
aliyun/dro-sfm
|
ProjectionInputDepth
| false
| 14,805
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
NeuralNetNonDifferentiableOutput
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class NeuralNetNonDifferentiableOutput(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetNonDifferentiableOutput, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out1 = self.relu(out)
out2 = self.fc2(out1)
mask1 = torch.gt(out1, 0.01)
mask1 = mask1.long()
mask2 = torch.lt(out2, 0.02)
mask2 = mask2.long()
return out1, mask1, out2, mask2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy_gt_relu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.01
tmp6 = tmp4 > tmp5
tmp7 = tmp6.to(tl.int64)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_lt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.02
tmp2 = tmp0 < tmp1
tmp3 = tmp2.to(tl.int64)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_relu_0[grid(256)](buf1, primals_2,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
triton_poi_fused__to_copy_lt_1[grid(256)](buf2, buf4, 256, XBLOCK=
128, num_warps=4, num_stages=1)
return buf1, buf3, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, primals_4
class NeuralNetNonDifferentiableOutputNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetNonDifferentiableOutputNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2], output[3]
|
almiliMSFT/onnxruntime
|
NeuralNetNonDifferentiableOutput
| false
| 14,806
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
NeuralNetPartialNoGradModel
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class NeuralNetPartialNoGradModel(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetPartialNoGradModel, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size).requires_grad_(
False)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, model_input):
out = self.relu(self.fc1(model_input))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
class NeuralNetPartialNoGradModelNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetPartialNoGradModelNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size).requires_grad_(
False)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
almiliMSFT/onnxruntime
|
NeuralNetPartialNoGradModel
| false
| 14,807
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
PixelSort
|
import torch
from torch import nn
class PixelSort(nn.Module):
"""The inverse operation of PixelShuffle
Reduces the spatial resolution, increasing the number of channels.
Currently, scale 0.5 is supported only.
Later, torch.nn.functional.pixel_sort may be implemented.
Reference:
http://pytorch.org/docs/0.3.0/_modules/torch/nn/modules/pixelshuffle.html#PixelShuffle
http://pytorch.org/docs/0.3.0/_modules/torch/nn/functional.html#pixel_shuffle
"""
def __init__(self, upscale_factor=0.5):
super(PixelSort, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, x):
b, c, h, w = x.size()
x = x.view(b, c, 2, 2, h // 2, w // 2)
x = x.permute(0, 1, 5, 3, 2, 4).contiguous()
x = x.view(b, 4 * c, h // 2, w // 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 32
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 2
x3 = xindex // 2 % 2
x4 = xindex // 4
y0 = yindex % 2
y1 = yindex // 2
x6 = xindex
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 4 * x4 + 8 * x3 + 16 * y1),
xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x6 + 8 * y5), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2, 2, 2), (64, 16, 8, 4, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(32, 8)](arg0_1, buf0, 32, 8, XBLOCK=8,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0),
class PixelSortNew(nn.Module):
"""The inverse operation of PixelShuffle
Reduces the spatial resolution, increasing the number of channels.
Currently, scale 0.5 is supported only.
Later, torch.nn.functional.pixel_sort may be implemented.
Reference:
http://pytorch.org/docs/0.3.0/_modules/torch/nn/modules/pixelshuffle.html#PixelShuffle
http://pytorch.org/docs/0.3.0/_modules/torch/nn/functional.html#pixel_shuffle
"""
def __init__(self, upscale_factor=0.5):
super(PixelSortNew, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alpayuz/DeepDeblur-PyTorch
|
PixelSort
| false
| 14,808
|
[
"MIT"
] | 158
|
771252e123e3a11da849bb9cef2a7cc49d8d1a2d
|
https://github.com/alpayuz/DeepDeblur-PyTorch/tree/771252e123e3a11da849bb9cef2a7cc49d8d1a2d
|
BertPooler
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2
class BertPoolerNew(nn.Module):
def __init__(self, config):
super(BertPoolerNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Aksh97/VGCN-BERT
|
BertPooler
| false
| 14,809
|
[
"MIT"
] | 106
|
62b5ae5a3c53f4bff555027d87a57d3a994a32bb
|
https://github.com/Aksh97/VGCN-BERT/tree/62b5ae5a3c53f4bff555027d87a57d3a994a32bb
|
enhance_net_nopool
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class CSDN_Tem(nn.Module):
def __init__(self, in_ch, out_ch):
super(CSDN_Tem, self).__init__()
self.depth_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch,
kernel_size=3, stride=1, padding=1, groups=in_ch)
self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch,
kernel_size=1, stride=1, padding=0, groups=1)
def forward(self, input):
out = self.depth_conv(input)
out = self.point_conv(out)
return out
class enhance_net_nopool(nn.Module):
def __init__(self, scale_factor):
super(enhance_net_nopool, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.scale_factor = scale_factor
self.upsample = nn.UpsamplingBilinear2d(scale_factor=self.scale_factor)
number_f = 32
self.e_conv1 = CSDN_Tem(3, number_f)
self.e_conv2 = CSDN_Tem(number_f, number_f)
self.e_conv3 = CSDN_Tem(number_f, number_f)
self.e_conv4 = CSDN_Tem(number_f, number_f)
self.e_conv5 = CSDN_Tem(number_f * 2, number_f)
self.e_conv6 = CSDN_Tem(number_f * 2, number_f)
self.e_conv7 = CSDN_Tem(number_f * 2, 3)
def enhance(self, x, x_r):
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image_1 = x + x_r * (torch.pow(x, 2) - x)
x = enhance_image_1 + x_r * (torch.pow(enhance_image_1, 2) -
enhance_image_1)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image = x + x_r * (torch.pow(x, 2) - x)
return enhance_image
def forward(self, x):
if self.scale_factor == 1:
x_down = x
else:
x_down = F.interpolate(x, scale_factor=1 / self.scale_factor,
mode='bilinear')
x1 = self.relu(self.e_conv1(x_down))
x2 = self.relu(self.e_conv2(x1))
x3 = self.relu(self.e_conv3(x2))
x4 = self.relu(self.e_conv4(x3))
x5 = self.relu(self.e_conv5(torch.cat([x3, x4], 1)))
x6 = self.relu(self.e_conv6(torch.cat([x2, x5], 1)))
x_r = F.tanh(self.e_conv7(torch.cat([x1, x6], 1)))
if self.scale_factor == 1:
x_r = x_r
else:
x_r = self.upsample(x_r)
enhance_image = self.enhance(x, x_r)
return enhance_image, x_r
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'scale_factor': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-32 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_add_convolution_mul_pow_sub_tanh_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp5 = tmp4 * tmp4
tmp6 = tmp5 - tmp4
tmp7 = tmp3 * tmp6
tmp8 = tmp4 + tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp9 - tmp8
tmp11 = tmp3 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp13 - tmp12
tmp15 = tmp3 * tmp14
tmp16 = tmp12 + tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp17 - tmp16
tmp19 = tmp3 * tmp18
tmp20 = tmp16 + tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp21 - tmp20
tmp23 = tmp3 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp25 - tmp24
tmp27 = tmp3 * tmp26
tmp28 = tmp24 + tmp27
tmp29 = tmp28 * tmp28
tmp30 = tmp29 - tmp28
tmp31 = tmp3 * tmp30
tmp32 = tmp28 + tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp33 - tmp32
tmp35 = tmp3 * tmp34
tmp36 = tmp32 + tmp35
tl.store(in_out_ptr0 + x3, tmp3, None)
tl.store(in_out_ptr1 + x3, tmp36, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (3,), (1,))
assert_size_stride(primals_4, (32, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_25, (32,), (1,))
assert_size_stride(primals_26, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_27, (64,), (1,))
assert_size_stride(primals_28, (3, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_29, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(524288)](buf5, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_1[grid(524288)](buf7, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_2[grid(524288)](buf9, primals_11,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_1[grid(524288)](buf11, primals_13,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf12, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_2[grid(524288)](buf13, primals_15,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf15 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(1048576)](buf11, buf14, primals_17,
buf15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_4[grid(1048576)](buf17, primals_19,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf19 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(1048576)](buf7, buf18, primals_21,
buf19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_4[grid(1048576)](buf21, primals_23,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf23 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(1048576)](buf3, buf22, primals_25,
buf23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_4[grid(1048576)](buf25, primals_27,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf27 = buf26
del buf26
buf28 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
buf29 = buf28
del buf28
buf30 = buf29
del buf29
triton_poi_fused_add_convolution_mul_pow_sub_tanh_5[grid(49152)](buf27,
buf30, primals_29, primals_1, 49152, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_29
buf31 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(524288)](
buf22, primals_25, buf31, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf22
del primals_25
buf32 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(524288)](
buf18, primals_21, buf32, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_21
buf33 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(524288)](
buf14, primals_17, buf33, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf14
del primals_17
return (buf30, buf27, primals_1, primals_2, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15,
buf17, buf19, buf21, buf23, buf25, buf27, buf31, buf32, buf33)
class CSDN_Tem(nn.Module):
def __init__(self, in_ch, out_ch):
super(CSDN_Tem, self).__init__()
self.depth_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch,
kernel_size=3, stride=1, padding=1, groups=in_ch)
self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch,
kernel_size=1, stride=1, padding=0, groups=1)
def forward(self, input):
out = self.depth_conv(input)
out = self.point_conv(out)
return out
class enhance_net_nopoolNew(nn.Module):
def __init__(self, scale_factor):
super(enhance_net_nopoolNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.scale_factor = scale_factor
self.upsample = nn.UpsamplingBilinear2d(scale_factor=self.scale_factor)
number_f = 32
self.e_conv1 = CSDN_Tem(3, number_f)
self.e_conv2 = CSDN_Tem(number_f, number_f)
self.e_conv3 = CSDN_Tem(number_f, number_f)
self.e_conv4 = CSDN_Tem(number_f, number_f)
self.e_conv5 = CSDN_Tem(number_f * 2, number_f)
self.e_conv6 = CSDN_Tem(number_f * 2, number_f)
self.e_conv7 = CSDN_Tem(number_f * 2, 3)
def enhance(self, x, x_r):
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image_1 = x + x_r * (torch.pow(x, 2) - x)
x = enhance_image_1 + x_r * (torch.pow(enhance_image_1, 2) -
enhance_image_1)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image = x + x_r * (torch.pow(x, 2) - x)
return enhance_image
def forward(self, input_0):
primals_2 = self.e_conv1.depth_conv.weight
primals_3 = self.e_conv1.depth_conv.bias
primals_4 = self.e_conv1.point_conv.weight
primals_5 = self.e_conv1.point_conv.bias
primals_6 = self.e_conv2.depth_conv.weight
primals_7 = self.e_conv2.depth_conv.bias
primals_8 = self.e_conv2.point_conv.weight
primals_9 = self.e_conv2.point_conv.bias
primals_10 = self.e_conv3.depth_conv.weight
primals_11 = self.e_conv3.depth_conv.bias
primals_12 = self.e_conv3.point_conv.weight
primals_13 = self.e_conv3.point_conv.bias
primals_14 = self.e_conv4.depth_conv.weight
primals_15 = self.e_conv4.depth_conv.bias
primals_16 = self.e_conv4.point_conv.weight
primals_17 = self.e_conv4.point_conv.bias
primals_18 = self.e_conv5.depth_conv.weight
primals_19 = self.e_conv5.depth_conv.bias
primals_20 = self.e_conv5.point_conv.weight
primals_21 = self.e_conv5.point_conv.bias
primals_22 = self.e_conv6.depth_conv.weight
primals_23 = self.e_conv6.depth_conv.bias
primals_24 = self.e_conv6.point_conv.weight
primals_25 = self.e_conv6.point_conv.bias
primals_26 = self.e_conv7.depth_conv.weight
primals_27 = self.e_conv7.depth_conv.bias
primals_28 = self.e_conv7.point_conv.weight
primals_29 = self.e_conv7.point_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29])
return output[0], output[1]
|
alisonwqq/Zero-DCE_extension
|
enhance_net_nopool
| false
| 14,810
|
[
"MIT"
] | 97
|
6b59b36cbe2983e216789583d837bdc88d3e5cf8
|
https://github.com/alisonwqq/Zero-DCE_extension/tree/6b59b36cbe2983e216789583d837bdc88d3e5cf8
|
NeuralNetMultiplePositionalArguments
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class NeuralNetMultiplePositionalArguments(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArguments, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out = self.fc1(model_input)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_4, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(
buf2, (64, 4), (4, 1), 0), primals_5, buf4
class NeuralNetMultiplePositionalArgumentsNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
almiliMSFT/onnxruntime
|
NeuralNetMultiplePositionalArguments
| false
| 14,811
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
TransformerEncoderLayer
|
from torch.nn import Module
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Identity
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class Attention(Module):
"""
Obtained from timm: github.com:rwightman/pytorch-image-models
"""
def __init__(self, dim, num_heads=8, attention_dropout=0.1,
projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = Linear(dim, dim * 3, bias=False)
self.attn_drop = Dropout(attention_dropout)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(projection_dropout)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class DropPath(nn.Module):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class TransformerEncoderLayer(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.1, drop_path_rate=0.1):
super(TransformerEncoderLayer, self).__init__()
self.pre_norm = LayerNorm(d_model)
self.self_attn = Attention(dim=d_model, num_heads=nhead,
attention_dropout=attention_dropout, projection_dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate
) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
def forward(self, src: 'torch.Tensor', *args, **kwargs) ->torch.Tensor:
src = src + self.drop_path(self.self_attn(self.pre_norm(src)))
src = self.norm1(src)
src2 = self.linear2(self.dropout1(self.activation(self.linear1(src))))
src = src + self.drop_path(self.dropout2(src2))
return src
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Identity
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (2048, 4), (4, 1))
assert_size_stride(primals_10, (2048,), (1,))
assert_size_stride(primals_11, (4, 2048), (2048, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 2048), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32
)
triton_poi_fused_gelu_10[grid(32768)](buf16, buf17, 32768, XBLOCK=
256, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_11, (2048, 4), (1, 2048), 0),
out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, buf15, primals_12, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class Attention(Module):
"""
Obtained from timm: github.com:rwightman/pytorch-image-models
"""
def __init__(self, dim, num_heads=8, attention_dropout=0.1,
projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = Linear(dim, dim * 3, bias=False)
self.attn_drop = Dropout(attention_dropout)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(projection_dropout)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class DropPath(nn.Module):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class TransformerEncoderLayerNew(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.1, drop_path_rate=0.1):
super(TransformerEncoderLayerNew, self).__init__()
self.pre_norm = LayerNorm(d_model)
self.self_attn = Attention(dim=d_model, num_heads=nhead,
attention_dropout=attention_dropout, projection_dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate
) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
def forward(self, input_0):
primals_1 = self.pre_norm.weight
primals_2 = self.pre_norm.bias
primals_4 = self.self_attn.qkv.weight
primals_5 = self.self_attn.proj.weight
primals_6 = self.self_attn.proj.bias
primals_9 = self.linear1.weight
primals_10 = self.linear1.bias
primals_7 = self.norm1.weight
primals_8 = self.norm1.bias
primals_11 = self.linear2.weight
primals_12 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
alihassanijr/Compact-Transformers
|
TransformerEncoderLayer
| false
| 14,812
|
[
"Apache-2.0"
] | 281
|
61b656eacdf113f92900f800410bb788bb7d9a3c
|
https://github.com/alihassanijr/Compact-Transformers/tree/61b656eacdf113f92900f800410bb788bb7d9a3c
|
TV_L1LOSS
|
import torch
import torch.nn as nn
import torch.utils.data
class TV_L1LOSS(nn.Module):
def __init__(self):
super(TV_L1LOSS, self).__init__()
def forward(self, x, y):
size = x.size()
h_tv_diff = torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :] - (y[:, :, 1
:, :] - y[:, :, :-1, :])).sum()
w_tv_diff = torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1] - (y[:, :, :,
1:] - y[:, :, :, :-1])).sum()
return (h_tv_diff + w_tv_diff) / size[0] / size[1] / size[2] / size[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (r0 + 16 * r1), rmask, other=0.0)
tmp12 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp13 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp15 = tl.load(in_ptr1 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp16 = tl.load(in_ptr1 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tmp2 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(rmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp14 = tmp12 - tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp14 - tmp17
tmp19 = tl_math.abs(tmp18)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(rmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp11 + tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class TV_L1LOSSNew(nn.Module):
def __init__(self):
super(TV_L1LOSSNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
alsgkals2/SRResCGAN
|
TV_L1LOSS
| false
| 14,813
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
L1GradLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class L1GradLoss(nn.Module):
def __init__(self, grad=False):
super(L1GradLoss, self).__init__()
self.grad = grad
def forward(self, input, target):
err = input - target
loss = err.norm(p=1).div(err.numel())
if self.grad:
loss += utils.imGrad(err, bc='reflexive').norm(p=1).div(err.numel()
)
return loss
def __repr__(self):
return self.__class__.__name__ + '(' + 'gradL1 = ' + str(self.grad
) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.00390625
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1GradLossNew(nn.Module):
def __init__(self, grad=False):
super(L1GradLossNew, self).__init__()
self.grad = grad
def __repr__(self):
return self.__class__.__name__ + '(' + 'gradL1 = ' + str(self.grad
) + ')'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
alsgkals2/SRResCGAN
|
L1GradLoss
| false
| 14,814
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.softmax1 = torch.nn.Softmax(dim=1)
self.softmax2 = torch.nn.Softmax(dim=1)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out2 = self.fc2(model_input)
out1 = self.softmax1(out1)
out2 = self.softmax2(out2)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_threshold_backward_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__softmax_relu_threshold_backward_2[grid(256)](buf3,
buf5, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = buf3
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__softmax_relu_threshold_backward_2[grid(256)](buf4,
buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
return buf5, buf6, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), buf1, buf2, buf7, buf8
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew(
torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.softmax1 = torch.nn.Softmax(dim=1)
self.softmax2 = torch.nn.Softmax(dim=1)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
almiliMSFT/onnxruntime
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
| false
| 14,815
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
MSEGradLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class MSEGradLoss(nn.Module):
def __init__(self, grad=False):
super(MSEGradLoss, self).__init__()
self.grad = grad
def forward(self, input, target):
err = input - target
loss = err.norm(p=2).pow(2).div(err.numel())
if self.grad:
loss += utils.imGrad(err, bc='reflexive').norm(p=2).pow(2).div(err
.numel())
return loss
def __repr__(self):
return self.__class__.__name__ + '(' + 'gradMSE = ' + str(self.grad
) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_linalg_vector_norm_pow_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tmp7 * tmp7
tmp9 = 0.00390625
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_pow_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSEGradLossNew(nn.Module):
def __init__(self, grad=False):
super(MSEGradLossNew, self).__init__()
self.grad = grad
def __repr__(self):
return self.__class__.__name__ + '(' + 'gradMSE = ' + str(self.grad
) + ')'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
alsgkals2/SRResCGAN
|
MSEGradLoss
| false
| 14,816
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
PoseHead
|
import torch
import torch.nn as nn
class PoseHead(nn.Module):
def __init__(self, input_dim=256, hidden_dim=128):
super(PoseHead, self).__init__()
self.conv1_pose = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2_pose = nn.Conv2d(hidden_dim, 6, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x_p):
out = self.conv2_pose(self.relu(self.conv1_pose(x_p))).mean(3).mean(2)
return torch.cat([out[:, :3], 0.01 * out[:, 3:]], dim=1)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_per_fused_convolution_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 1536
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x4 = xindex
x1 = xindex // 64 % 6
tmp0 = tl.load(in_ptr0 + (r3 + 64 * x4), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused_convolution_mean_2(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 24
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1), xmask)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x0 + 6 * x1), tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
tmp0 = tl.load(in_ptr0 + (3 + x0 + 6 * x1), xmask)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = 0.01
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0 + 6 * x1), tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_4, (6, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_5, (6,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2097152)](buf1, primals_2,
2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 6, 64, 64), (24576, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 6, 64), (384, 64, 1), torch.float32)
triton_per_fused_convolution_mean_1[grid(1536)](buf2, primals_5,
buf3, 1536, 64, XBLOCK=8, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_per_fused_convolution_mean_2[grid(24)](buf3, buf4, 24, 64,
XBLOCK=32, num_warps=8, num_stages=1)
del buf3
buf7 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
buf5 = reinterpret_tensor(buf7, (4, 3), (6, 1), 0)
triton_poi_fused_cat_3[grid(12)](buf4, buf5, 12, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf7, (4, 3), (6, 1), 3)
triton_poi_fused_mul_4[grid(12)](buf4, buf6, 12, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf7, primals_1, primals_3, primals_4, buf1
class PoseHeadNew(nn.Module):
def __init__(self, input_dim=256, hidden_dim=128):
super(PoseHeadNew, self).__init__()
self.conv1_pose = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2_pose = nn.Conv2d(hidden_dim, 6, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1_pose.weight
primals_2 = self.conv1_pose.bias
primals_4 = self.conv2_pose.weight
primals_5 = self.conv2_pose.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
aliyun/dro-sfm
|
PoseHead
| false
| 14,817
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
ProjectionInputPose
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ProjectionInputPose(nn.Module):
def __init__(self, cost_dim, hidden_dim, out_chs):
super().__init__()
self.out_chs = out_chs
self.convc1 = nn.Conv2d(cost_dim, hidden_dim, 1, padding=0)
self.convc2 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convp1 = nn.Conv2d(6, hidden_dim, 7, padding=3)
self.convp2 = nn.Conv2d(hidden_dim, 64, 3, padding=1)
self.convp = nn.Conv2d(64 + hidden_dim, out_chs - 6, 3, padding=1)
def forward(self, pose, cost):
bs, _, h, w = cost.shape
cor = F.relu(self.convc1(cost))
cor = F.relu(self.convc2(cor))
pfm = F.relu(self.convp1(pose.view(bs, 6, 1, 1).repeat(1, 1, h, w)))
pfm = F.relu(self.convp2(pfm))
cor_pfm = torch.cat([cor, pfm], dim=1)
out_p = F.relu(self.convp(cor_pfm))
return torch.cat([out_p, pose.view(bs, 6, 1, 1).repeat(1, 1, h, w)],
dim=1)
def get_inputs():
return [torch.rand([4, 6, 1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cost_dim': 4, 'hidden_dim': 256, 'out_chs': 8}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 320
x0 = xindex % 16
x2 = xindex // 5120
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 4096 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 320, tl.int64)
tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-256 + x1) + 1024 * x2), tmp12,
other=0.0)
tmp16 = tl.load(in_ptr3 + (-256 + x1), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x3, tmp21, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 32 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-2 + x1) + 96 * x2), tmp12 &
xmask, other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 2
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_7, (256, 6, 7, 7), (294, 49, 7, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (64,), (1,))
assert_size_stride(primals_11, (2, 320, 3, 3), (2880, 9, 3, 1))
assert_size_stride(primals_12, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_3,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1))
buf3 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
triton_poi_fused_repeat_1[grid(384)](primals_6, buf3, 384, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_8,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf6 = extern_kernels.convolution(buf5, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 16, 4, 1))
buf7 = empty_strided_cuda((4, 320, 4, 4), (5120, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(20480)](buf2, primals_5, buf6,
primals_10, buf7, 20480, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 2, 4, 4), (32, 16, 4, 1))
buf9 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(512)](buf8, primals_12, buf3, buf9, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(128)](buf8,
primals_12, buf10, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_12
buf11 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(4096)](buf6
, primals_10, buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_10
buf12 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(16384)](
buf2, primals_5, buf12, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del primals_5
return (buf9, primals_1, primals_2, primals_4, primals_7, primals_9,
primals_11, buf1, buf3, buf5, buf7, buf10, buf11, buf12)
class ProjectionInputPoseNew(nn.Module):
def __init__(self, cost_dim, hidden_dim, out_chs):
super().__init__()
self.out_chs = out_chs
self.convc1 = nn.Conv2d(cost_dim, hidden_dim, 1, padding=0)
self.convc2 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convp1 = nn.Conv2d(6, hidden_dim, 7, padding=3)
self.convp2 = nn.Conv2d(hidden_dim, 64, 3, padding=1)
self.convp = nn.Conv2d(64 + hidden_dim, out_chs - 6, 3, padding=1)
def forward(self, input_0, input_1):
primals_2 = self.convc1.weight
primals_3 = self.convc1.bias
primals_4 = self.convc2.weight
primals_5 = self.convc2.bias
primals_7 = self.convp1.weight
primals_8 = self.convp1.bias
primals_9 = self.convp2.weight
primals_10 = self.convp2.bias
primals_11 = self.convp.weight
primals_12 = self.convp.bias
primals_6 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
aliyun/dro-sfm
|
ProjectionInputPose
| false
| 14,818
|
[
"MIT"
] | 147
|
8707e2e0ef799d7d47418a018060f503ef449fe3
|
https://github.com/aliyun/dro-sfm/tree/8707e2e0ef799d7d47418a018060f503ef449fe3
|
ResNetV2
|
import torch
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width,
kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.
GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True
)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width *
4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width *
4, cout=width * 4, cmid=width)) for i in range(2, block_units[0
] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8,
cout=width * 8, cmid=width * 2)) for i in range(2, block_units[
1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16,
cout=width * 16, cmid=width * 4)) for i in range(2, block_units
[2] + 1)])))]))
def forward(self, x):
x = self.root(x)
x = self.body(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block_units': [4, 4, 4], 'width_factor': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 262144 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 15
x2 = xindex // 3840 % 15
x3 = xindex // 57600
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 225
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 1024
x1 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * r2 + 230400 * x1), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 225, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 225.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x3, tmp21, None)
tl.store(out_ptr0 + x3, tmp10, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 1800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 57600 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 1800.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 256
x2 = xindex // 57600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1800.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2304.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 230400 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 7200.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 230400
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 1024 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 1024 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 225.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 7200.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 230400
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 7200.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 2048
x1 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r2 + 131072 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 64.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x3, tmp18, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp13, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 3600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 115200 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 3600.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 115200
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 3600.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4608.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 2048 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 2048 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 4096.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 4096.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 65536 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x3, tmp18, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp13, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 9216.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = rindex // 32
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = rindex // 128
tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 2048.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = yindex // 16
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + (32 * y1 + x2 // 128), ymask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * y1 + x2 // 128), ymask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp17, ymask)
@triton.jit
def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = yindex // 4096
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (y0 + 4096 * x2 + 65536 * y1), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121) = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (1024,), (1,))
assert_size_stride(primals_7, (1024,), (1,))
assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (1024,), (1,))
assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_18, (256,), (1,))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256,), (1,))
assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_24, (1024,), (1,))
assert_size_stride(primals_25, (1024,), (1,))
assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256,), (1,))
assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (256,), (1,))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_33, (1024,), (1,))
assert_size_stride(primals_34, (1024,), (1,))
assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_36, (256,), (1,))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (256,), (1,))
assert_size_stride(primals_40, (256,), (1,))
assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_42, (1024,), (1,))
assert_size_stride(primals_43, (1024,), (1,))
assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_45, (2048,), (1,))
assert_size_stride(primals_46, (2048,), (1,))
assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_48, (512,), (1,))
assert_size_stride(primals_49, (512,), (1,))
assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_51, (512,), (1,))
assert_size_stride(primals_52, (512,), (1,))
assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_54, (2048,), (1,))
assert_size_stride(primals_55, (2048,), (1,))
assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_57, (512,), (1,))
assert_size_stride(primals_58, (512,), (1,))
assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_60, (512,), (1,))
assert_size_stride(primals_61, (512,), (1,))
assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_63, (2048,), (1,))
assert_size_stride(primals_64, (2048,), (1,))
assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_66, (512,), (1,))
assert_size_stride(primals_67, (512,), (1,))
assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_69, (512,), (1,))
assert_size_stride(primals_70, (512,), (1,))
assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_72, (2048,), (1,))
assert_size_stride(primals_73, (2048,), (1,))
assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_75, (512,), (1,))
assert_size_stride(primals_76, (512,), (1,))
assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_78, (512,), (1,))
assert_size_stride(primals_79, (512,), (1,))
assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_81, (2048,), (1,))
assert_size_stride(primals_82, (2048,), (1,))
assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_84, (4096,), (1,))
assert_size_stride(primals_85, (4096,), (1,))
assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_87, (1024,), (1,))
assert_size_stride(primals_88, (1024,), (1,))
assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_90, (1024,), (1,))
assert_size_stride(primals_91, (1024,), (1,))
assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_93, (4096,), (1,))
assert_size_stride(primals_94, (4096,), (1,))
assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_96, (1024,), (1,))
assert_size_stride(primals_97, (1024,), (1,))
assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_99, (1024,), (1,))
assert_size_stride(primals_100, (1024,), (1,))
assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_102, (4096,), (1,))
assert_size_stride(primals_103, (4096,), (1,))
assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_105, (1024,), (1,))
assert_size_stride(primals_106, (1024,), (1,))
assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_108, (1024,), (1,))
assert_size_stride(primals_109, (1024,), (1,))
assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_111, (4096,), (1,))
assert_size_stride(primals_112, (4096,), (1,))
assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_114, (1024,), (1,))
assert_size_stride(primals_115, (1024,), (1,))
assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_117, (1024,), (1,))
assert_size_stride(primals_118, (1024,), (1,))
assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_120, (4096,), (1,))
assert_size_stride(primals_121, (4096,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_11, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_20, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_29
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_38, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_50
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_59, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_59
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_68, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_68
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_77, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_77
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_89, buf10, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_89
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_98, buf11, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_98
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_107, buf12, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_107
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_116, buf13, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_116
buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf15
buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
triton_per_fused_add_div_sqrt_sub_var_mean_5[grid(256)](buf17, buf0,
buf18, 256, 147, XBLOCK=1, num_warps=2, num_stages=1)
buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_6[grid(128)](buf19, buf20, buf21,
buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_7[grid(1048576)](buf19,
buf20, buf21, primals_3, primals_4, buf24, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_4
buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf24,
buf25, buf26, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf28
buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf30,
primals_5, buf31, 1024, 256, num_warps=2, num_stages=1)
buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33,
buf34, buf36, 4096, 225, XBLOCK=1, num_warps=2, num_stages=1)
buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf38
buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(256)](buf40,
primals_8, buf41, 256, 256, num_warps=2, num_stages=1)
buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf43 = buf21
del buf21
buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf42, buf43,
buf44, buf46, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf42,
buf43, buf44, primals_9, primals_10, buf47, 230400, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_10
buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf51,
buf2, buf52, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf54 = buf44
del buf44
buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf53, buf54,
buf55, buf57, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf53,
buf54, buf55, primals_12, primals_13, buf58, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_13
buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf60
buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf62,
primals_14, buf63, 1024, 256, num_warps=2, num_stages=1)
buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf65 = buf55
del buf55
buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_15[grid(128)](buf64, buf65,
buf66, buf68, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
buf70 = buf69
del buf69
triton_poi_fused_add_native_group_norm_relu_16[grid(921600)](buf70,
buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66,
primals_15, primals_16, 921600, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_16
del primals_7
buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf72
buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf74,
primals_17, buf75, 256, 1024, num_warps=8, num_stages=1)
buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf77 = buf66
del buf66
buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf76, buf77,
buf78, buf80, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf76,
buf77, buf78, primals_18, primals_19, buf81, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_19
buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf83
buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf85,
buf3, buf86, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf88 = buf78
del buf78
buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf87, buf88,
buf89, buf91, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf87,
buf88, buf89, primals_21, primals_22, buf92, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_22
buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf94
buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf96,
primals_23, buf97, 1024, 256, num_warps=2, num_stages=1)
buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf99 = buf89
del buf89
buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf98, buf99,
buf100, buf102, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf70,
buf98, buf99, buf100, primals_24, primals_25, buf103, 921600,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf105
buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf107,
primals_26, buf108, 256, 1024, num_warps=8, num_stages=1)
buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf110 = buf100
del buf100
buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf109, buf110,
buf111, buf113, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf109,
buf110, buf111, primals_27, primals_28, buf114, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_28
buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf116
buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf118,
buf4, buf119, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf121 = buf111
del buf111
buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf120, buf121,
buf122, buf124, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf120,
buf121, buf122, primals_30, primals_31, buf125, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_31
buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf127
buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf129,
primals_32, buf130, 1024, 256, num_warps=2, num_stages=1)
buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf132 = buf122
del buf122
buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf131, buf132,
buf133, buf135, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf103,
buf131, buf132, buf133, primals_33, primals_34, buf136, 921600,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_34
buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf138
buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf140,
primals_35, buf141, 256, 1024, num_warps=8, num_stages=1)
buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf143 = buf133
del buf133
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf142, buf143,
buf144, buf146, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf142,
buf143, buf144, primals_36, primals_37, buf147, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_37
buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf149
buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf151,
buf5, buf152, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf154 = buf144
del buf144
buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf153, buf154,
buf155, buf157, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf153,
buf154, buf155, primals_39, primals_40, buf158, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_40
buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf160
buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf162,
primals_41, buf163, 1024, 256, num_warps=2, num_stages=1)
buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf165 = buf155
del buf155
buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf164, buf165,
buf166, buf168, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf136,
buf164, buf165, buf166, primals_42, primals_43, buf169, 921600,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf171
buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf173,
primals_44, buf174, 2048, 1024, num_warps=8, num_stages=1)
buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
triton_per_fused_native_group_norm_20[grid(8192)](buf175, buf176,
buf177, buf179, 8192, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf181
buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_21[grid(512)](buf183,
primals_47, buf184, 512, 1024, num_warps=8, num_stages=1)
buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512))
buf186 = buf166
del buf166
buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_22[grid(128)](buf185, buf186,
buf187, buf189, 128, 3600, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512
), torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(460800)](buf185,
buf186, buf187, primals_48, primals_49, buf190, 460800, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_49
buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf192
buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf194,
buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf197 = buf187
del buf187
buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf196, buf197,
buf198, buf200, 128, 1024, num_warps=8, num_stages=1)
buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf196,
buf197, buf198, primals_51, primals_52, buf201, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_52
buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf203
buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf205,
primals_53, buf206, 2048, 512, num_warps=4, num_stages=1)
buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf208 = buf198
del buf198
buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf207, buf208,
buf209, buf211, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
buf213 = buf212
del buf212
triton_poi_fused_add_native_group_norm_relu_29[grid(524288)](buf213,
buf175, buf176, buf177, primals_45, primals_46, buf207, buf208,
buf209, primals_54, primals_55, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf177
del primals_46
del primals_55
buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf215
buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217,
primals_56, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf220 = buf209
del buf209
buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf219, buf220,
buf221, buf223, 128, 1024, num_warps=8, num_stages=1)
buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf219,
buf220, buf221, primals_57, primals_58, buf224, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_58
buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf226
buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf228,
buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf231 = buf221
del buf221
buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf230, buf231,
buf232, buf234, 128, 1024, num_warps=8, num_stages=1)
buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf230,
buf231, buf232, primals_60, primals_61, buf235, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_61
buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf237
buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf239,
primals_62, buf240, 2048, 512, num_warps=4, num_stages=1)
buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf242 = buf232
del buf232
buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf241, buf242,
buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf213,
buf241, buf242, buf243, primals_63, primals_64, buf246, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_64
buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf248
buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf250,
primals_65, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf253 = buf243
del buf243
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf252, buf253,
buf254, buf256, 128, 1024, num_warps=8, num_stages=1)
buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf252,
buf253, buf254, primals_66, primals_67, buf257, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_67
buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf259
buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf261,
buf8, buf262, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf264 = buf254
del buf254
buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf263, buf264,
buf265, buf267, 128, 1024, num_warps=8, num_stages=1)
buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf263,
buf264, buf265, primals_69, primals_70, buf268, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_70
buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf270
buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf272,
primals_71, buf273, 2048, 512, num_warps=4, num_stages=1)
buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf275 = buf265
del buf265
buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf274, buf275,
buf276, buf278, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf246,
buf274, buf275, buf276, primals_72, primals_73, buf279, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_73
buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf281
buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf283,
primals_74, buf284, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf286 = buf276
del buf276
buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf285, buf286,
buf287, buf289, 128, 1024, num_warps=8, num_stages=1)
buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf285,
buf286, buf287, primals_75, primals_76, buf290, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_76
buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf292
buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf294,
buf9, buf295, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf297 = buf287
del buf287
buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf296, buf297,
buf298, buf300, 128, 1024, num_warps=8, num_stages=1)
buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf296,
buf297, buf298, primals_78, primals_79, buf301, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_79
buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf303
buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf305,
primals_80, buf306, 2048, 512, num_warps=4, num_stages=1)
buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf308 = buf298
del buf298
buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf307, buf308,
buf309, buf311, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf279,
buf307, buf308, buf309, primals_81, primals_82, buf312, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_82
buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096,
4096), 0)
del buf34
buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf314
buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf316,
primals_83, buf317, 4096, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
triton_per_fused_native_group_norm_33[grid(16384)](buf318, buf319,
buf320, buf322, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1)
buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf324
buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_34[grid(1024)](buf326,
primals_86, buf327, 1024, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf329 = buf309
del buf309
buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_35[grid(128)](buf328, buf329,
buf330, buf332, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_36[grid(262144)](buf328,
buf329, buf330, primals_87, primals_88, buf333, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_88
buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf335
buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf337,
buf10, buf338, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf340 = buf330
del buf330
buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf339, buf340,
buf341, buf343, 128, 512, num_warps=4, num_stages=1)
buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf339,
buf340, buf341, primals_90, primals_91, buf344, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_91
buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf346
buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf348,
primals_92, buf349, 4096, 1024, num_warps=8, num_stages=1)
buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf351 = buf341
del buf341
buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351,
buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
buf356 = buf355
del buf355
triton_poi_fused_add_native_group_norm_relu_42[grid(262144)](buf356,
buf318, buf319, buf320, primals_84, primals_85, buf350, buf351,
buf352, primals_93, primals_94, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf320
del primals_85
del primals_94
buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf358
buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf360,
primals_95, buf361, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf363 = buf352
del buf352
buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf362, buf363,
buf364, buf366, 128, 512, num_warps=4, num_stages=1)
buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf362,
buf363, buf364, primals_96, primals_97, buf367, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_97
buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf369
buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf371,
buf11, buf372, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf374 = buf364
del buf364
buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf373, buf374,
buf375, buf377, 128, 512, num_warps=4, num_stages=1)
buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf373,
buf374, buf375, primals_99, primals_100, buf378, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_100
buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf380
buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf382,
primals_101, buf383, 4096, 1024, num_warps=8, num_stages=1)
buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf385 = buf375
del buf375
buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385,
buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf356,
buf384, buf385, buf386, primals_102, primals_103, buf389,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_103
buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf391
buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393,
primals_104, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf396 = buf386
del buf386
buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf395, buf396,
buf397, buf399, 128, 512, num_warps=4, num_stages=1)
buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf395,
buf396, buf397, primals_105, primals_106, buf400, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_106
buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf402
buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf404,
buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf407 = buf397
del buf397
buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf406, buf407,
buf408, buf410, 128, 512, num_warps=4, num_stages=1)
buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf406,
buf407, buf408, primals_108, primals_109, buf411, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_109
buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf413
buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf415,
primals_110, buf416, 4096, 1024, num_warps=8, num_stages=1)
buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf418 = buf408
del buf408
buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf417, buf418,
buf419, buf421, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf389,
buf417, buf418, buf419, primals_111, primals_112, buf422,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_112
buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf424
buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf426,
primals_113, buf427, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf429 = buf419
del buf419
buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf428, buf429,
buf430, buf432, 128, 512, num_warps=4, num_stages=1)
buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf428,
buf429, buf430, primals_114, primals_115, buf433, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_115
buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf435
buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf437,
buf13, buf438, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf440 = buf430
del buf430
buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf439, buf440,
buf441, buf443, 128, 512, num_warps=4, num_stages=1)
buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf439,
buf440, buf441, primals_117, primals_118, buf444, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_118
buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf446
buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf448,
primals_119, buf449, 4096, 1024, num_warps=8, num_stages=1)
buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf451 = buf441
del buf441
buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf450, buf451,
buf452, buf454, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1),
torch.float32)
triton_poi_fused_add_native_group_norm_relu_45[grid(64, 4096)](buf422,
buf450, buf451, buf452, primals_120, primals_121, buf455, 64,
4096, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del buf452
del primals_121
buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.bool)
triton_poi_fused_threshold_backward_46[grid(16384, 16)](buf455,
buf456, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8,
primals_9, buf2, primals_12, primals_14, primals_15, primals_17,
primals_18, buf3, primals_21, primals_23, primals_24, primals_26,
primals_27, buf4, primals_30, primals_32, primals_33, primals_35,
primals_36, buf5, primals_39, primals_41, primals_42, primals_44,
primals_45, primals_47, primals_48, buf6, primals_51, primals_53,
primals_54, primals_56, primals_57, buf7, primals_60, primals_62,
primals_63, primals_65, primals_66, buf8, primals_69, primals_71,
primals_72, primals_74, primals_75, buf9, primals_78, primals_80,
primals_81, primals_83, primals_84, primals_86, primals_87, buf10,
primals_90, primals_92, primals_93, primals_95, primals_96, buf11,
primals_99, primals_101, primals_102, primals_104, primals_105,
buf12, primals_108, primals_110, primals_111, primals_113,
primals_114, buf13, primals_117, primals_119, primals_120, buf17,
buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0),
reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26,
buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1),
0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40,
buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0),
reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52,
buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0),
reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63,
buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0),
reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75,
buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0),
reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86,
buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0),
reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97,
buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0),
reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107,
buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0),
reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118,
buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0),
reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129,
buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0),
reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140,
buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0),
reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151,
buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0),
reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162,
buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0),
reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173,
buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0),
reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184,
buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0),
reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194,
buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0),
reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205,
buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0),
reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217,
buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0),
reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228,
buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0),
reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239,
buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0),
reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250,
buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0),
reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261,
buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0),
reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272,
buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0),
reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283,
buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0),
reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294,
buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0),
reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305,
buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0),
reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316,
buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0),
reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327,
buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0),
reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337,
buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0),
reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348,
buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0),
reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360,
buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0),
reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371,
buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0),
reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382,
buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0),
reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393,
buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0),
reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404,
buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0),
reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415,
buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0),
reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426,
buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0),
reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437,
buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0),
reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448,
buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0),
reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class ResNetV2New(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width,
kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.
GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True
)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width *
4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width *
4, cout=width * 4, cmid=width)) for i in range(2, block_units[0
] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8,
cout=width * 8, cmid=width * 2)) for i in range(2, block_units[
1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16,
cout=width * 16, cmid=width * 4)) for i in range(2, block_units
[2] + 1)])))]))
def forward(self, input_0):
primals_1 = self.root.conv.weight
primals_3 = self.root.gn.weight
primals_4 = self.root.gn.bias
primals_9 = self.body.block1.unit1.gn1.weight
primals_10 = self.body.block1.unit1.gn1.bias
primals_8 = self.body.block1.unit1.conv1.weight
primals_12 = self.body.block1.unit1.gn2.weight
primals_13 = self.body.block1.unit1.gn2.bias
primals_11 = self.body.block1.unit1.conv2.weight
primals_6 = self.body.block1.unit1.gn3.weight
primals_7 = self.body.block1.unit1.gn3.bias
primals_5 = self.body.block1.unit1.conv3.weight
primals_14 = self.body.block1.unit1.downsample.weight
primals_15 = self.body.block1.unit1.gn_proj.weight
primals_16 = self.body.block1.unit1.gn_proj.bias
primals_18 = self.body.block1.unit2.gn1.weight
primals_19 = self.body.block1.unit2.gn1.bias
primals_17 = self.body.block1.unit2.conv1.weight
primals_21 = self.body.block1.unit2.gn2.weight
primals_22 = self.body.block1.unit2.gn2.bias
primals_20 = self.body.block1.unit2.conv2.weight
primals_24 = self.body.block1.unit2.gn3.weight
primals_25 = self.body.block1.unit2.gn3.bias
primals_23 = self.body.block1.unit2.conv3.weight
primals_27 = self.body.block1.unit3.gn1.weight
primals_28 = self.body.block1.unit3.gn1.bias
primals_26 = self.body.block1.unit3.conv1.weight
primals_30 = self.body.block1.unit3.gn2.weight
primals_31 = self.body.block1.unit3.gn2.bias
primals_29 = self.body.block1.unit3.conv2.weight
primals_33 = self.body.block1.unit3.gn3.weight
primals_34 = self.body.block1.unit3.gn3.bias
primals_32 = self.body.block1.unit3.conv3.weight
primals_36 = self.body.block1.unit4.gn1.weight
primals_37 = self.body.block1.unit4.gn1.bias
primals_35 = self.body.block1.unit4.conv1.weight
primals_39 = self.body.block1.unit4.gn2.weight
primals_40 = self.body.block1.unit4.gn2.bias
primals_38 = self.body.block1.unit4.conv2.weight
primals_42 = self.body.block1.unit4.gn3.weight
primals_43 = self.body.block1.unit4.gn3.bias
primals_41 = self.body.block1.unit4.conv3.weight
primals_48 = self.body.block2.unit1.gn1.weight
primals_49 = self.body.block2.unit1.gn1.bias
primals_47 = self.body.block2.unit1.conv1.weight
primals_51 = self.body.block2.unit1.gn2.weight
primals_52 = self.body.block2.unit1.gn2.bias
primals_50 = self.body.block2.unit1.conv2.weight
primals_45 = self.body.block2.unit1.gn3.weight
primals_46 = self.body.block2.unit1.gn3.bias
primals_53 = self.body.block2.unit1.conv3.weight
primals_44 = self.body.block2.unit1.downsample.weight
primals_54 = self.body.block2.unit1.gn_proj.weight
primals_55 = self.body.block2.unit1.gn_proj.bias
primals_57 = self.body.block2.unit2.gn1.weight
primals_58 = self.body.block2.unit2.gn1.bias
primals_56 = self.body.block2.unit2.conv1.weight
primals_60 = self.body.block2.unit2.gn2.weight
primals_61 = self.body.block2.unit2.gn2.bias
primals_59 = self.body.block2.unit2.conv2.weight
primals_63 = self.body.block2.unit2.gn3.weight
primals_64 = self.body.block2.unit2.gn3.bias
primals_62 = self.body.block2.unit2.conv3.weight
primals_66 = self.body.block2.unit3.gn1.weight
primals_67 = self.body.block2.unit3.gn1.bias
primals_65 = self.body.block2.unit3.conv1.weight
primals_69 = self.body.block2.unit3.gn2.weight
primals_70 = self.body.block2.unit3.gn2.bias
primals_68 = self.body.block2.unit3.conv2.weight
primals_72 = self.body.block2.unit3.gn3.weight
primals_73 = self.body.block2.unit3.gn3.bias
primals_71 = self.body.block2.unit3.conv3.weight
primals_75 = self.body.block2.unit4.gn1.weight
primals_76 = self.body.block2.unit4.gn1.bias
primals_74 = self.body.block2.unit4.conv1.weight
primals_78 = self.body.block2.unit4.gn2.weight
primals_79 = self.body.block2.unit4.gn2.bias
primals_77 = self.body.block2.unit4.conv2.weight
primals_81 = self.body.block2.unit4.gn3.weight
primals_82 = self.body.block2.unit4.gn3.bias
primals_80 = self.body.block2.unit4.conv3.weight
primals_87 = self.body.block3.unit1.gn1.weight
primals_88 = self.body.block3.unit1.gn1.bias
primals_86 = self.body.block3.unit1.conv1.weight
primals_90 = self.body.block3.unit1.gn2.weight
primals_91 = self.body.block3.unit1.gn2.bias
primals_89 = self.body.block3.unit1.conv2.weight
primals_84 = self.body.block3.unit1.gn3.weight
primals_85 = self.body.block3.unit1.gn3.bias
primals_92 = self.body.block3.unit1.conv3.weight
primals_83 = self.body.block3.unit1.downsample.weight
primals_93 = self.body.block3.unit1.gn_proj.weight
primals_94 = self.body.block3.unit1.gn_proj.bias
primals_96 = self.body.block3.unit2.gn1.weight
primals_97 = self.body.block3.unit2.gn1.bias
primals_95 = self.body.block3.unit2.conv1.weight
primals_99 = self.body.block3.unit2.gn2.weight
primals_100 = self.body.block3.unit2.gn2.bias
primals_98 = self.body.block3.unit2.conv2.weight
primals_102 = self.body.block3.unit2.gn3.weight
primals_103 = self.body.block3.unit2.gn3.bias
primals_101 = self.body.block3.unit2.conv3.weight
primals_105 = self.body.block3.unit3.gn1.weight
primals_106 = self.body.block3.unit3.gn1.bias
primals_104 = self.body.block3.unit3.conv1.weight
primals_108 = self.body.block3.unit3.gn2.weight
primals_109 = self.body.block3.unit3.gn2.bias
primals_107 = self.body.block3.unit3.conv2.weight
primals_111 = self.body.block3.unit3.gn3.weight
primals_112 = self.body.block3.unit3.gn3.bias
primals_110 = self.body.block3.unit3.conv3.weight
primals_114 = self.body.block3.unit4.gn1.weight
primals_115 = self.body.block3.unit4.gn1.bias
primals_113 = self.body.block3.unit4.conv1.weight
primals_117 = self.body.block3.unit4.gn2.weight
primals_118 = self.body.block3.unit4.gn2.bias
primals_116 = self.body.block3.unit4.conv2.weight
primals_120 = self.body.block3.unit4.gn3.weight
primals_121 = self.body.block3.unit4.gn3.bias
primals_119 = self.body.block3.unit4.conv3.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121])
return output[0]
|
Willy0919/progressive-coordinate-transforms
|
ResNetV2
| false
| 14,819
|
[
"Apache-2.0",
"MIT"
] | 142
|
b637fa2541a815d270e162a4c9cd3348b098d48a
|
https://github.com/Willy0919/progressive-coordinate-transforms/tree/b637fa2541a815d270e162a4c9cd3348b098d48a
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency
|
import torch
import torch.nn
import torch.onnx
import torch.utils.checkpoint
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency(torch.
nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency,
self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.softmax = torch.nn.Softmax(dim=1)
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out1 = self.softmax(out1)
out2 = self.fc2(out1)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
import torch.onnx
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
del buf2
extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_6
return buf3, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf3, primals_5
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.softmax = torch.nn.Softmax(dim=1)
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
almiliMSFT/onnxruntime
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency
| false
| 14,820
|
[
"MIT"
] | 6,036
|
c002dc86a364852859ca9642698fcfc5edf22c9d
|
https://github.com/almiliMSFT/onnxruntime/tree/c002dc86a364852859ca9642698fcfc5edf22c9d
|
TV_L1Loss
|
import torch
import torch.nn as nn
import torch.utils.data
class TV_L1Loss(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TV_L1Loss, self).__init__()
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.abs(x[:, :, 1:, :] - x[:, :, :h_x - 1, :]).sum()
w_tv = torch.abs(x[:, :, :, 1:] - x[:, :, :, :w_x - 1]).sum()
return (h_tv / count_h + w_tv / count_w) / batch_size
def tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TV_L1LossNew(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TV_L1LossNew, self).__init__()
def tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alsgkals2/SRResCGAN
|
TV_L1Loss
| false
| 14,821
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
GraphLearner
|
from torch.nn import Module
import torch
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.functional as F
class GraphLearner(Module):
def __init__(self, in_feature_dim, combined_feature_dim, K, dropout=0.0):
super(GraphLearner, self).__init__()
"""
## Variables:
- in_feature_dim: dimensionality of input features
- combined_feature_dim: dimensionality of the joint hidden embedding
- K: number of graph nodes/objects on the image
"""
self.in_dim = in_feature_dim
self.combined_dim = combined_feature_dim
self.K = K
self.edge_layer_1 = nn.Linear(in_feature_dim, combined_feature_dim)
self.edge_layer_2 = nn.Linear(combined_feature_dim,
combined_feature_dim)
self.dropout = nn.Dropout(p=dropout)
self.edge_layer_1 = nn.utils.weight_norm(self.edge_layer_1)
self.edge_layer_2 = nn.utils.weight_norm(self.edge_layer_2)
def forward(self, graph_nodes):
"""
## Inputs:
- graph_nodes (batch_size, K, in_feat_dim): input features
## Returns:
- adjacency matrix (batch_size, K, K)
"""
graph_nodes = graph_nodes.view(-1, self.in_dim)
h = self.edge_layer_1(graph_nodes)
h = F.relu(h)
h = self.edge_layer_2(h)
h = F.relu(h)
h = h.view(-1, self.K, self.combined_dim)
adjacency_matrix = torch.matmul(h, h.transpose(1, 2))
return adjacency_matrix
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature_dim': 4, 'combined_feature_dim': 4, 'K': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn.modules.module import Module
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3,
primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_2[grid(256)](buf3, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_6, buf4,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_6,
primals_5, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(buf5, (4, 4), (1, 4), 0),
out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_2[grid(256)](buf7, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), out=buf8)
return (buf8, buf1, buf5, primals_2, primals_3, primals_5, primals_6,
reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, buf3, buf4,
buf7, buf5)
class GraphLearnerNew(Module):
def __init__(self, in_feature_dim, combined_feature_dim, K, dropout=0.0):
super(GraphLearnerNew, self).__init__()
"""
## Variables:
- in_feature_dim: dimensionality of input features
- combined_feature_dim: dimensionality of the joint hidden embedding
- K: number of graph nodes/objects on the image
"""
self.in_dim = in_feature_dim
self.combined_dim = combined_feature_dim
self.K = K
self.edge_layer_1 = nn.Linear(in_feature_dim, combined_feature_dim)
self.edge_layer_2 = nn.Linear(combined_feature_dim,
combined_feature_dim)
self.dropout = nn.Dropout(p=dropout)
self.edge_layer_1 = nn.utils.weight_norm(self.edge_layer_1)
self.edge_layer_2 = nn.utils.weight_norm(self.edge_layer_2)
def forward(self, input_0):
primals_4 = self.edge_layer_1.bias
primals_2 = self.edge_layer_1.weight_g
primals_3 = self.edge_layer_1.weight_v
primals_7 = self.edge_layer_2.bias
primals_5 = self.edge_layer_2.weight_g
primals_6 = self.edge_layer_2.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
aimbrain/vqa-project
|
GraphLearner
| false
| 14,822
|
[
"Apache-2.0"
] | 145
|
341122a267293017b55db4f033fbe81445af03ea
|
https://github.com/aimbrain/vqa-project/tree/341122a267293017b55db4f033fbe81445af03ea
|
LSTMRegressCriterion
|
import torch
import torch.nn as nn
class LSTMRegressCriterion(nn.Module):
def __init__(self):
super(LSTMRegressCriterion, self).__init__()
def forward(self, pred, target, mask):
pred = pred.clone()
target = target.clone()
mask = mask.clone()
target = target[:, :pred.size(1), :]
mask = mask[:, :pred.size(1), :]
diff = 0.5 * (pred - target) ** 2
diff = diff * mask
output = torch.sum(diff) / torch.sum(mask)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tl.broadcast_to(tmp6, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = tmp10 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LSTMRegressCriterionNew(nn.Module):
def __init__(self):
super(LSTMRegressCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
aluo-x/shape2prog
|
LSTMRegressCriterion
| false
| 14,823
|
[
"BSD-2-Clause"
] | 109
|
1177e5205b99bb293e353688b564c94a14211c75
|
https://github.com/aluo-x/shape2prog/tree/1177e5205b99bb293e353688b564c94a14211c75
|
ResidualBlock
|
import torch
import torch.nn as nn
import torch.utils.data
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.prelu(residual)
residual = self.conv2(residual)
return x + residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1,
primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_1[grid(256)](buf4, primals_3,
primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf4, primals_1, primals_3, primals_4, primals_5, buf1, buf2
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.prelu.weight
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
alsgkals2/SRResCGAN
|
ResidualBlock
| false
| 14,824
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
TV_L2Loss
|
import torch
import torch.nn as nn
import torch.utils.data
class TV_L2Loss(nn.Module):
def __init__(self):
super(TV_L2Loss, self).__init__()
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return (h_tv / count_h + w_tv / count_w) / batch_size
def tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TV_L2LossNew(nn.Module):
def __init__(self):
super(TV_L2LossNew, self).__init__()
def tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alsgkals2/SRResCGAN
|
TV_L2Loss
| false
| 14,825
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
SigmoidRange
|
from torch.nn import Module
import functools
import torch
import torch.nn as nn
from typing import *
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class PrePostInitMeta(type):
"""A metaclass that calls optional `__pre_init__` and `__post_init__` methods"""
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
def _pass(self, *args, **kwargs):
pass
for o in ('__init__', '__pre_init__', '__post_init__'):
if not hasattr(x, o):
setattr(x, o, _pass)
old_init = x.__init__
@functools.wraps(old_init)
def _init(self, *args, **kwargs):
self.__pre_init__()
old_init(self, *args, **kwargs)
self.__post_init__()
setattr(x, '__init__', _init)
return x
class Module(nn.Module, metaclass=PrePostInitMeta):
"""Same as `nn.Module`, but no need for subclasses to call `super().__init__`"""
def __pre_init__(self):
super().__init__()
def __init__(self):
pass
class SigmoidRange(Module):
"""Sigmoid module with range `(low, high)`"""
def __init__(self, low, high):
self.low, self.high = low, high
def forward(self, x):
return sigmoid_range(x, self.low, self.high)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'low': 4, 'high': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import functools
import torch.nn as nn
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.0
tmp3 = tmp1 * tmp2
tmp4 = 4.0
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class PrePostInitMeta(type):
"""A metaclass that calls optional `__pre_init__` and `__post_init__` methods"""
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
def _pass(self, *args, **kwargs):
pass
for o in ('__init__', '__pre_init__', '__post_init__'):
if not hasattr(x, o):
setattr(x, o, _pass)
old_init = x.__init__
@functools.wraps(old_init)
def _init(self, *args, **kwargs):
self.__pre_init__()
old_init(self, *args, **kwargs)
self.__post_init__()
setattr(x, '__init__', _init)
return x
class Module(nn.Module, metaclass=PrePostInitMeta):
"""Same as `nn.Module`, but no need for subclasses to call `super().__init__`"""
def __pre_init__(self):
super().__init__()
def __init__(self):
pass
class SigmoidRangeNew(Module):
"""Sigmoid module with range `(low, high)`"""
def __init__(self, low, high):
self.low, self.high = low, high
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
amaarora/fastai_dev
|
SigmoidRange
| false
| 14,826
|
[
"Apache-2.0"
] | 380
|
ffea51a553e4a7f71bc7240730b370cd0d07cb0a
|
https://github.com/amaarora/fastai_dev/tree/ffea51a553e4a7f71bc7240730b370cd0d07cb0a
|
LSTMClassCriterion
|
import torch
import torch.nn as nn
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LSTMClassCriterion(nn.Module):
def __init__(self):
super(LSTMClassCriterion, self).__init__()
def forward(self, pred, target, mask):
pred = pred.clone()
target = target.clone()
mask = mask.clone()
target = target[:, :pred.size(1)]
mask = mask[:, :pred.size(1)]
pred = to_contiguous(pred).view(-1, pred.size(2))
target = to_contiguous(target).view(-1, 1)
mask = to_contiguous(mask).view(-1, 1)
loss = -pred.gather(1, target) * mask
loss = torch.sum(loss) / torch.sum(mask)
_, idx = torch.max(pred, dim=1)
correct = idx.eq(torch.squeeze(target))
correct = correct.float() * torch.squeeze(mask)
accuracy = torch.sum(correct) / torch.sum(mask)
return loss, accuracy
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_gather_max_mul_neg_sum_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + r0, None)
tmp39 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.full([1, 1], 1, tl.int64)
tmp6 = tmp4 < tmp5
tmp7 = tmp3 & tmp6
tmp8 = tmp2 | tmp7
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.where(tmp8, tmp4, tmp5)
tmp12 = tmp9 > tmp11
tmp13 = tmp9 == tmp11
tmp14 = tl.full([1, 1], 2, tl.int64)
tmp15 = tmp10 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tmp12 | tmp16
tmp18 = tl.where(tmp17, tmp9, tmp11)
tmp19 = tl.where(tmp17, tmp10, tmp14)
tmp21 = tmp18 > tmp20
tmp22 = tmp18 == tmp20
tmp23 = tl.full([1, 1], 3, tl.int64)
tmp24 = tmp19 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tmp21 | tmp25
tl.where(tmp26, tmp18, tmp20)
tmp28 = tl.where(tmp26, tmp19, tmp23)
tmp30 = tmp28 == tmp29
tmp31 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp32 = tmp29 + tmp31
tmp33 = tmp29 < 0
tmp34 = tl.where(tmp33, tmp32, tmp29)
tl.device_assert((0 <= tmp34) & (tmp34 < 4),
'index out of bounds: 0 <= tmp34 < 4')
tmp36 = tl.load(in_ptr0 + (tmp34 + 4 * r0), None, eviction_policy=
'evict_last')
tmp37 = -tmp36
tmp38 = tmp37.to(tl.float32)
tmp40 = tmp38 * tmp39
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp46 = tl.sum(tmp44, 1)[:, None]
tmp47 = tmp30.to(tl.float32)
tmp48 = tmp47 * tmp39
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = tmp51 / tmp46
tmp53 = tmp43 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp52, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf6 = buf3
del buf3
buf5 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_gather_max_mul_neg_sum_0[grid(1)](buf6
, buf5, arg0_1, arg1_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf5, buf6
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LSTMClassCriterionNew(nn.Module):
def __init__(self):
super(LSTMClassCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
aluo-x/shape2prog
|
LSTMClassCriterion
| false
| 14,827
|
[
"BSD-2-Clause"
] | 109
|
1177e5205b99bb293e353688b564c94a14211c75
|
https://github.com/aluo-x/shape2prog/tree/1177e5205b99bb293e353688b564c94a14211c75
|
Discriminator
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, num_inputs, args):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
prob = torch.sigmoid(self.fc3(x))
return prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'args': _mock_config(hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_sigmoid_1[grid(64)](buf5, primals_7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, primals_6, primals_4
class DiscriminatorNew(nn.Module):
def __init__(self, num_inputs, args):
super(DiscriminatorNew, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
amy12xx/lets-do-irl
|
Discriminator
| false
| 14,828
|
[
"MIT"
] | 408
|
fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
https://github.com/amy12xx/lets-do-irl/tree/fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
MaxMarginRankingLoss
|
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class MaxMarginRankingLoss(nn.Module):
def __init__(self, margin=1.0, negative_weighting=False, batch_size=1,
n_pair=1, hard_negative_rate=0.5):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 -
easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 -
easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + F.relu(self.
margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask
return max_margin.mean()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_relu_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = tmp2 - tmp7
tmp9 = triton_helpers.maximum(tmp5, tmp8)
tmp10 = tmp6 + tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_relu_sub_0[grid(1)](buf1, arg0_1, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class MaxMarginRankingLossNew(nn.Module):
def __init__(self, margin=1.0, negative_weighting=False, batch_size=1,
n_pair=1, hard_negative_rate=0.5):
super(MaxMarginRankingLossNew, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 -
easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 -
easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
amirziai/CLIP4Clip
|
MaxMarginRankingLoss
| false
| 14,829
|
[
"MIT"
] | 294
|
d1f31c881ed897a513c29e62512cd56c482420e6
|
https://github.com/amirziai/CLIP4Clip/tree/d1f31c881ed897a513c29e62512cd56c482420e6
|
GaussianFilter
|
import torch
import torch.nn as nn
import torch.utils.data
class GaussianFilter(nn.Module):
def __init__(self, kernel_size=13, stride=1, padding=6):
super(GaussianFilter, self).__init__()
mean = (kernel_size - 1) / 2.0
variance = ((kernel_size - 1) / 6.0) ** 2.0
x_coord = torch.arange(kernel_size)
x_grid = x_coord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()
gaussian_kernel = torch.exp(-torch.sum((xy_grid - mean) ** 2.0, dim
=-1) / (2 * variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)
self.gaussian_filter = nn.Conv2d(3, 3, kernel_size, stride=stride,
padding=padding, groups=3, bias=False)
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, x):
return self.gaussian_filter(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp0, ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 13, 13), (169, 169, 13, 1))
assert_size_stride(arg1_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(12, 4096)](arg1_1, buf0, 12,
4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1),
padding=(6, 6), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf1, (4, 3, 64, 64), (12288, 1, 192, 3))
del arg0_1
buf2 = reinterpret_tensor(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1), 0
)
del buf0
triton_poi_fused_convolution_1[grid(12, 4096)](buf1, buf2, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
return buf2,
class GaussianFilterNew(nn.Module):
def __init__(self, kernel_size=13, stride=1, padding=6):
super(GaussianFilterNew, self).__init__()
mean = (kernel_size - 1) / 2.0
variance = ((kernel_size - 1) / 6.0) ** 2.0
x_coord = torch.arange(kernel_size)
x_grid = x_coord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()
gaussian_kernel = torch.exp(-torch.sum((xy_grid - mean) ** 2.0, dim
=-1) / (2 * variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)
self.gaussian_filter = nn.Conv2d(3, 3, kernel_size, stride=stride,
padding=padding, groups=3, bias=False)
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, input_0):
arg0_1 = self.gaussian_filter.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
alsgkals2/SRResCGAN
|
GaussianFilter
| false
| 14,830
|
[
"MIT"
] | 81
|
a71201a93e1819045f9c7711743812546d3a1f31
|
https://github.com/alsgkals2/SRResCGAN/tree/a71201a93e1819045f9c7711743812546d3a1f31
|
VDB
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class VDB(nn.Module):
def __init__(self, num_inputs, args):
super(VDB, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.z_size)
self.fc3 = nn.Linear(args.hidden_size, args.z_size)
self.fc4 = nn.Linear(args.z_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, 1)
self.fc5.weight.data.mul_(0.1)
self.fc5.bias.data.mul_(0.0)
def encoder(self, x):
h = torch.tanh(self.fc1(x))
return self.fc2(h), self.fc3(h)
def reparameterize(self, mu, logvar):
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
return mu + std * eps
def discriminator(self, z):
h = torch.tanh(self.fc4(z))
return torch.sigmoid(self.fc5(h))
def forward(self, x):
mu, logvar = self.encoder(x)
z = self.reparameterize(mu, logvar)
prob = self.discriminator(z)
return prob, mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'args': _mock_config(hidden_size=4,
z_size=4)}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (1, 4), (4, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_exp_mul_1[grid(256)](buf2, buf3, buf5,
buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_tanh_0[grid(256)](buf8, primals_9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 1), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_sigmoid_2[grid(64)](buf10, primals_11, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_11
return buf10, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, reinterpret_tensor(buf6, (64, 4), (4, 1), 0
), buf8, buf10, primals_10, primals_8, primals_6, primals_4
class VDBNew(nn.Module):
def __init__(self, num_inputs, args):
super(VDBNew, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.z_size)
self.fc3 = nn.Linear(args.hidden_size, args.z_size)
self.fc4 = nn.Linear(args.z_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, 1)
self.fc5.weight.data.mul_(0.1)
self.fc5.bias.data.mul_(0.0)
def encoder(self, x):
h = torch.tanh(self.fc1(x))
return self.fc2(h), self.fc3(h)
def reparameterize(self, mu, logvar):
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
return mu + std * eps
def discriminator(self, z):
h = torch.tanh(self.fc4(z))
return torch.sigmoid(self.fc5(h))
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
|
amy12xx/lets-do-irl
|
VDB
| false
| 14,831
|
[
"MIT"
] | 408
|
fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
https://github.com/amy12xx/lets-do-irl/tree/fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
Critic
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Critic(nn.Module):
def __init__(self, num_inputs, args):
super(Critic, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
v = self.fc3(x)
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'args': _mock_config(hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class CriticNew(nn.Module):
def __init__(self, num_inputs, args):
super(CriticNew, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
amy12xx/lets-do-irl
|
Critic
| false
| 14,832
|
[
"MIT"
] | 408
|
fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
https://github.com/amy12xx/lets-do-irl/tree/fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
BasicBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
amyami187/nngeometry
|
BasicBlock
| false
| 14,833
|
[
"MIT"
] | 103
|
cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
https://github.com/amyami187/nngeometry/tree/cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
DeResNetBlockGroupNorm
|
import torch
import torch.nn as nn
def deconv3x3(in_planes, out_planes, stride=1, output_padding=0):
"""3x3 deconvolution with padding"""
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=
stride, padding=1, output_padding=output_padding, bias=False)
class DeResNetBlockGroupNorm(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1,
output_padding=0, activation='relu'):
super(DeResNetBlockGroupNorm, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.deconv2 = deconv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes,
kernel_size=1, stride=stride, output_padding=output_padding,
bias=False), nn.GroupNorm(num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, x):
residual = x
out = self.deconv1(x)
out = self.gn1(out)
out = self.activation(out)
out = self.deconv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3,
primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid
(4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12,
buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6,
buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0),
reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6,
reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor(
buf10, (4, 1), (1, 1), 0), buf12)
def deconv3x3(in_planes, out_planes, stride=1, output_padding=0):
"""3x3 deconvolution with padding"""
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=
stride, padding=1, output_padding=output_padding, bias=False)
class DeResNetBlockGroupNormNew(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1,
output_padding=0, activation='relu'):
super(DeResNetBlockGroupNormNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.deconv2 = deconv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes,
kernel_size=1, stride=stride, output_padding=output_padding,
bias=False), nn.GroupNorm(num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, input_0):
primals_2 = self.deconv1.weight
primals_3 = self.gn1.weight
primals_4 = self.gn1.bias
primals_5 = self.deconv2.weight
primals_6 = self.gn2.weight
primals_7 = self.gn2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
andrecianflone/wolf
|
DeResNetBlockGroupNorm
| false
| 14,834
|
[
"Apache-2.0"
] | 75
|
826bbedc58d4d29871110349356868066a3108e6
|
https://github.com/andrecianflone/wolf/tree/826bbedc58d4d29871110349356868066a3108e6
|
PairwiseBilinear
|
import math
import torch
import torch.nn as nn
class PairwiseBilinear(nn.Module):
"""
https://github.com/stanfordnlp/stanza/blob/v1.1.1/stanza/models/common/biaffine.py#L5 # noqa
"""
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', bias: 'bool'=True):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(in1_features, out_features,
in2_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
bound = 1 / math.sqrt(self.weight.size(0))
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input1: 'torch.Tensor', input2: 'torch.Tensor'
) ->torch.Tensor:
d1, d2, out = self.in1_features, self.in2_features, self.out_features
n1, n2 = input1.size(1), input2.size(1)
x1W = torch.mm(input1.view(-1, d1), self.weight.view(d1, out * d2))
x1Wx2 = x1W.view(-1, n1 * out, d2).bmm(input2.transpose(1, 2))
y = x1Wx2.view(-1, n1, self.out_features, n2).transpose(2, 3)
if self.bias is not None:
y.add_(self.bias)
return y
def extra_repr(self) ->str:
return ('in1_features={}, in2_features={}, out_features={}, bias={}'
.format(self.in1_features, self.in2_features, self.out_features,
self.bias is not None))
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in1_features': 4, 'in2_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_transpose_0(in_ptr0, in_ptr1, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (16, 1), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0),
out=buf1)
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 1, 4), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_transpose_0[grid(64, 4)](buf1, primals_4, buf3,
64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del buf1
del primals_4
return buf3, primals_2, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0)
class PairwiseBilinearNew(nn.Module):
"""
https://github.com/stanfordnlp/stanza/blob/v1.1.1/stanza/models/common/biaffine.py#L5 # noqa
"""
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', bias: 'bool'=True):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(in1_features, out_features,
in2_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
bound = 1 / math.sqrt(self.weight.size(0))
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
nn.init.uniform_(self.bias, -bound, bound)
def extra_repr(self) ->str:
return ('in1_features={}, in2_features={}, out_features={}, bias={}'
.format(self.in1_features, self.in2_features, self.out_features,
self.bias is not None))
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_4 = self.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andhikayusup/biaffineparser
|
PairwiseBilinear
| false
| 14,835
|
[
"Apache-2.0"
] | 46
|
30180b805bdb6c0f1e0386ceb090ba83d6ab2621
|
https://github.com/andhikayusup/biaffineparser/tree/30180b805bdb6c0f1e0386ceb090ba83d6ab2621
|
CrossEmbeddings
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class CrossEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(CrossEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(config.
max_position_embeddings, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, concat_embeddings, concat_type=None):
_batch_size, seq_length = concat_embeddings.size(0
), concat_embeddings.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=
concat_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(concat_embeddings.
size(0), -1)
position_embeddings = self.position_embeddings(position_ids)
embeddings = concat_embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(max_position_embeddings=4,
hidden_size=4, hidden_dropout_prob=0.5)}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr2 + (x0 + 4 * tmp5), xmask)
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_embedding_1[grid(256)](primals_1, buf0,
primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, reinterpret_tensor(buf0, (1, 4), (4, 1), 0)
class CrossEmbeddingsNew(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(CrossEmbeddingsNew, self).__init__()
self.position_embeddings = nn.Embedding(config.
max_position_embeddings, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0):
primals_2 = self.position_embeddings.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
amirziai/CLIP4Clip
|
CrossEmbeddings
| false
| 14,836
|
[
"MIT"
] | 294
|
d1f31c881ed897a513c29e62512cd56c482420e6
|
https://github.com/amirziai/CLIP4Clip/tree/d1f31c881ed897a513c29e62512cd56c482420e6
|
AdaIN2d
|
import torch
import torch.nn as nn
class AdaIN2d(nn.Module):
def __init__(self, in_channels, in_features):
super(AdaIN2d, self).__init__()
self.norm = nn.InstanceNorm2d(in_channels, affine=False,
track_running_stats=False)
self.net = nn.Linear(in_features, 2 * in_channels)
self.reset_parameters()
def forward(self, x, h):
h = self.net(h)
bs, fs = h.size()
h.view(bs, fs, 1, 1)
b, s = h.chunk(2, 1)
x = self.norm(x)
return x * (s + 1) + b
def reset_parameters(self):
nn.init.constant_(self.net.weight, 0.0)
nn.init.constant_(self.net.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r2 = rindex % 4
r3 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (4 + r2 + 8 * r3), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (4 + r2), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (r2 + 8 * r3), None, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp26 = tmp24 + tmp25
tmp27 = 1.0
tmp28 = tmp26 + tmp27
tmp29 = tmp23 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaIN2dNew(nn.Module):
def __init__(self, in_channels, in_features):
super(AdaIN2dNew, self).__init__()
self.norm = nn.InstanceNorm2d(in_channels, affine=False,
track_running_stats=False)
self.net = nn.Linear(in_features, 2 * in_channels)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.net.weight, 0.0)
nn.init.constant_(self.net.bias, 0.0)
def forward(self, input_0, input_1):
primals_1 = self.net.weight
primals_2 = self.net.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andrecianflone/wolf
|
AdaIN2d
| false
| 14,837
|
[
"Apache-2.0"
] | 75
|
826bbedc58d4d29871110349356868066a3108e6
|
https://github.com/andrecianflone/wolf/tree/826bbedc58d4d29871110349356868066a3108e6
|
Biaffine
|
import math
import torch
import torch.nn as nn
class PairwiseBilinear(nn.Module):
"""
https://github.com/stanfordnlp/stanza/blob/v1.1.1/stanza/models/common/biaffine.py#L5 # noqa
"""
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', bias: 'bool'=True):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(in1_features, out_features,
in2_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
bound = 1 / math.sqrt(self.weight.size(0))
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input1: 'torch.Tensor', input2: 'torch.Tensor'
) ->torch.Tensor:
d1, d2, out = self.in1_features, self.in2_features, self.out_features
n1, n2 = input1.size(1), input2.size(1)
x1W = torch.mm(input1.view(-1, d1), self.weight.view(d1, out * d2))
x1Wx2 = x1W.view(-1, n1 * out, d2).bmm(input2.transpose(1, 2))
y = x1Wx2.view(-1, n1, self.out_features, n2).transpose(2, 3)
if self.bias is not None:
y.add_(self.bias)
return y
def extra_repr(self) ->str:
return ('in1_features={}, in2_features={}, out_features={}, bias={}'
.format(self.in1_features, self.in2_features, self.out_features,
self.bias is not None))
class Biaffine(nn.Module):
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int'):
super().__init__()
self.bilinear = PairwiseBilinear(in1_features + 1, in2_features + 1,
out_features)
self.bilinear.weight.data.zero_()
self.bilinear.bias.data.zero_()
def forward(self, input1: 'torch.Tensor', input2: 'torch.Tensor'
) ->torch.Tensor:
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)
], dim=input1.dim() - 1)
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)
], dim=input2.dim() - 1)
return self.bilinear(input1, input2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in1_features': 4, 'in2_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_transpose_1(in_ptr0, in_ptr1, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (5, 4, 5), (20, 5, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_view_0[grid(80)](primals_1, buf0, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 20), (20, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (5, 20), (20,
1), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_view_0[grid(80)](primals_2, buf2, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 5), (80, 5, 1),
0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 1, 4), torch.float32)
triton_poi_fused_add_transpose_1[grid(64, 4)](buf3, primals_4, buf5,
64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del buf3
del primals_4
return buf5, buf2, reinterpret_tensor(buf0, (5, 16), (1, 5), 0)
class PairwiseBilinear(nn.Module):
"""
https://github.com/stanfordnlp/stanza/blob/v1.1.1/stanza/models/common/biaffine.py#L5 # noqa
"""
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', bias: 'bool'=True):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(in1_features, out_features,
in2_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
bound = 1 / math.sqrt(self.weight.size(0))
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input1: 'torch.Tensor', input2: 'torch.Tensor'
) ->torch.Tensor:
d1, d2, out = self.in1_features, self.in2_features, self.out_features
n1, n2 = input1.size(1), input2.size(1)
x1W = torch.mm(input1.view(-1, d1), self.weight.view(d1, out * d2))
x1Wx2 = x1W.view(-1, n1 * out, d2).bmm(input2.transpose(1, 2))
y = x1Wx2.view(-1, n1, self.out_features, n2).transpose(2, 3)
if self.bias is not None:
y.add_(self.bias)
return y
def extra_repr(self) ->str:
return ('in1_features={}, in2_features={}, out_features={}, bias={}'
.format(self.in1_features, self.in2_features, self.out_features,
self.bias is not None))
class BiaffineNew(nn.Module):
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int'):
super().__init__()
self.bilinear = PairwiseBilinear(in1_features + 1, in2_features + 1,
out_features)
self.bilinear.weight.data.zero_()
self.bilinear.bias.data.zero_()
def forward(self, input_0, input_1):
primals_3 = self.bilinear.weight
primals_4 = self.bilinear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andhikayusup/biaffineparser
|
Biaffine
| false
| 14,838
|
[
"Apache-2.0"
] | 46
|
30180b805bdb6c0f1e0386ceb090ba83d6ab2621
|
https://github.com/andhikayusup/biaffineparser/tree/30180b805bdb6c0f1e0386ceb090ba83d6ab2621
|
DeepMind
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DeepMind(nn.Module):
def __init__(self):
super(DeepMind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
def get_inputs():
return [torch.rand([4, 4, 144, 144])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 156800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1225 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (512, 1568), (1568, 1))
assert_size_stride(primals_9, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2,
156800, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 14, 14), (6272, 196, 14, 1))
buf5 = buf4
del buf4
buf9 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(25088)](
buf5, primals_7, buf9, 25088, XBLOCK=128, num_warps=4, num_stages=1
)
del primals_7
buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0
), reinterpret_tensor(primals_8, (1568, 512), (1, 1568), 0),
out=buf6)
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((16, 512), (512, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(8192)](buf7,
primals_9, buf8, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf3,
reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0), buf8, primals_8,
buf9)
class DeepMindNew(nn.Module):
def __init__(self):
super(DeepMindNew, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
TianhongDai/Self_Imitation_Learning
|
DeepMind
| false
| 14,839
|
[
"MIT"
] | 61
|
e49003582fa3d875495d84682f2a3332d4922dbc
|
https://github.com/TianhongDai/Self_Imitation_Learning/tree/e49003582fa3d875495d84682f2a3332d4922dbc
|
Actor
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Actor(nn.Module):
def __init__(self, num_inputs, num_outputs, args):
super(Actor, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, num_outputs)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
mu = self.fc3(x)
logstd = torch.zeros_like(mu)
std = torch.exp(logstd)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4, 'args': _mock_config(
hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_exp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_exp_1[grid(256)](buf5, 256, XBLOCK=256, num_warps=
4, num_stages=1)
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class ActorNew(nn.Module):
def __init__(self, num_inputs, num_outputs, args):
super(ActorNew, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, num_outputs)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
amy12xx/lets-do-irl
|
Actor
| false
| 14,840
|
[
"MIT"
] | 408
|
fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
https://github.com/amy12xx/lets-do-irl/tree/fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
MAELoss
|
import torch
import torch.nn as nn
class MAELoss(nn.Module):
def __init__(self):
super(MAELoss, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = torch.ne(target, 0).float()
loss = target * val_pixels - outputs * val_pixels
return torch.sum(torch.abs(loss)) / torch.sum(val_pixels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_div_mul_ne_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp4 - tmp6
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.broadcast_to(tmp3, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp11 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_abs_div_mul_ne_sub_sum_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class MAELossNew(nn.Module):
def __init__(self):
super(MAELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anglixjtu/MSG_CHN_WACV20
|
MAELoss
| false
| 14,841
|
[
"Apache-2.0"
] | 61
|
6910894cf3caed2ffde27586f96b132b0c1d1a98
|
https://github.com/anglixjtu/MSG_CHN_WACV20/tree/6910894cf3caed2ffde27586f96b132b0c1d1a98
|
LinearConvNet
|
import torch
import torch.nn as nn
class LinearConvNet(nn.Module):
def __init__(self):
super(LinearConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 5, 3, 1)
self.conv2 = nn.Conv2d(1, 3, 2, 1, bias=False)
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(x)
output = torch.stack([conv1_out.sum(dim=(1, 2, 3)), conv2_out.sum(
dim=(1, 2, 3))], dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_convolution_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 12
rnumel = 6407
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 3
x1 = xindex // 3
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = r2 + 6407 * x0
tmp1 = tl.full([1, 1], 19220, tl.int32)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (19220 * x1 + (r2 + 6407 * x0) % 19220),
rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr1 + (r2 + 6407 * x0) // 3844 % 5, rmask & tmp2 &
xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.full(tmp5.shape, 0, tmp5.dtype)
tmp7 = tl.where(tmp2, tmp5, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused_convolution_stack_sum_1(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 3
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 3 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr1 + 2 * x0, tmp4, xmask)
@triton.jit
def triton_red_fused_sum_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 8
rnumel = 5954
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 2
x1 = xindex // 2
_tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = r2 + 5954 * x0
tmp1 = tl.full([1, 1], 11907, tl.int32)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (11907 * x1 + (r2 + 5954 * x0) % 11907),
rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = _tmp5 + tmp4
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = tl.sum(_tmp5, 1)[:, None]
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_per_fused_stack_sum_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 2 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr1 + 2 * x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (5, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (3, 1, 2, 2), (4, 4, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 5, 62, 62), (19220, 3844, 62, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 3, 63, 63), (11907, 3969, 63, 1))
buf2 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
get_raw_stream(0)
triton_red_fused_convolution_sum_0[grid(12)](buf0, primals_2, buf2,
12, 6407, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del buf0
del primals_2
buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
buf6 = reinterpret_tensor(buf8, (4, 1), (2, 1), 0)
triton_per_fused_convolution_stack_sum_1[grid(4)](buf2, buf6, 4, 3,
XBLOCK=1, num_warps=2, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_red_fused_sum_2[grid(8)](buf1, buf4, 8, 5954, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del buf1
buf7 = reinterpret_tensor(buf8, (4, 1), (2, 1), 1)
triton_per_fused_stack_sum_3[grid(4)](buf4, buf7, 4, 2, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
return buf8, primals_1, primals_3, primals_4
class LinearConvNetNew(nn.Module):
def __init__(self):
super(LinearConvNetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 5, 3, 1)
self.conv2 = nn.Conv2d(1, 3, 2, 1, bias=False)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
amyami187/nngeometry
|
LinearConvNet
| false
| 14,842
|
[
"MIT"
] | 103
|
cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
https://github.com/amyami187/nngeometry/tree/cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
NICEMLPBlock
|
import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class NICEMLPBlock(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super(NICEMLPBlock, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x):
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3(out)
return out
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'hidden_features': 4,
'activation': 'relu'}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_2[grid(4)](primals_7, buf4,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_3[grid(16)](primals_7,
primals_6, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6,
primals_5, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0)
del buf6
extern_kernels.addmm(primals_8, buf7, reinterpret_tensor(buf5, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_8
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, primals_6, primals_7, reinterpret_tensor(primals_3, (64, 4
), (4, 1), 0), buf2, buf4, buf7, buf5, buf9, primals_4, buf10
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class NICEMLPBlockNew(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super(NICEMLPBlockNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_8 = self.fc3.linear.bias
primals_6 = self.fc3.linear.weight_g
primals_7 = self.fc3.linear.weight_v
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
andrecianflone/wolf
|
NICEMLPBlock
| false
| 14,843
|
[
"Apache-2.0"
] | 75
|
826bbedc58d4d29871110349356868066a3108e6
|
https://github.com/andrecianflone/wolf/tree/826bbedc58d4d29871110349356868066a3108e6
|
TransformerEncoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class DotProductAttention(nn.Module):
def __init__(self, dropout=0.0):
super(DotProductAttention, self).__init__()
self.dropout = dropout
def forward(self, q, k, v, attn_mask=None):
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_output_weights += attn_mask
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout,
training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
return attn_output
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim=
None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim ==
embed_dim)
if self._qkv_same_embed_dim:
self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim,
embed_dim))
else:
raise RuntimeError(
'Do not support q, k, v have different dimensions')
if bias:
self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim)
if self._qkv_same_embed_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
self.dotproductattention = DotProductAttention(dropout)
def forward(self, q, k, v, attn_mask=None, key_padding_mask=None):
tsz, bsz, embed_dim = q.shape[0], q.shape[1], q.shape[2]
head_dim = embed_dim // self.num_heads
assert head_dim * self.num_heads == embed_dim, 'embed_dim must be divisible by num_heads'
scaling = float(head_dim) ** -0.5
_b = self.in_proj_bias
_start = None
_end = embed_dim
_w = self.in_proj_weight[:_end, :]
if _b is not None:
_b = _b[:_end]
q = F.linear(q, _w, _b)
_b = self.in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = self.in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(k, _w, _b)
_b = self.in_proj_bias
_start = embed_dim * 2
_end = None
_w = self.in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(v, _w, _b)
q = q * scaling
q = q.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0).repeat(bsz, 1, 1)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attn_mask = attn_mask.reshape(-1, *attn_mask.shape[2:])
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, tsz, 1)
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, self
.num_heads, 1, 1)
key_padding_mask = key_padding_mask.reshape(-1, *
key_padding_mask.shape[2:])
if attn_mask is not None and key_padding_mask is not None:
mask = attn_mask + key_padding_mask
elif attn_mask is not None:
mask = attn_mask
elif key_padding_mask is not None:
mask = key_padding_mask
else:
mask = None
attn_output = self.dotproductattention(q, k, v, mask)
attn_output = attn_output.transpose(0, 1).contiguous().view(tsz,
bsz, self.embed_dim)
return self.out_proj(attn_output), None
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (2048, 4), (4, 1))
assert_size_stride(primals_9, (2048,), (1,))
assert_size_stride(primals_10, (4, 2048), (2048, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 4),
reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 8),
reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1,
16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(64)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 2048), (8192, 2048, 1), 0)
del buf13
buf20 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(32768)](buf14,
primals_9, buf20, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0),
out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
triton_poi_fused_add_7[grid(64)](buf16, buf12, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf17 = buf11
del buf11
buf18 = buf10
del buf10
triton_poi_fused_native_layer_norm_8[grid(16)](buf16, buf17, buf18,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf16, buf17, buf18,
primals_12, primals_13, buf19, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf17
del buf18
del primals_13
return buf19, primals_1, primals_6, primals_12, buf6, reinterpret_tensor(
buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0
), buf16, primals_10, buf20, primals_8, primals_4, reinterpret_tensor(
buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1,
4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0)
def _get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class DotProductAttention(nn.Module):
def __init__(self, dropout=0.0):
super(DotProductAttention, self).__init__()
self.dropout = dropout
def forward(self, q, k, v, attn_mask=None):
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_output_weights += attn_mask
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout,
training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
return attn_output
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim=
None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim ==
embed_dim)
if self._qkv_same_embed_dim:
self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim,
embed_dim))
else:
raise RuntimeError(
'Do not support q, k, v have different dimensions')
if bias:
self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim)
if self._qkv_same_embed_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
self.dotproductattention = DotProductAttention(dropout)
def forward(self, q, k, v, attn_mask=None, key_padding_mask=None):
tsz, bsz, embed_dim = q.shape[0], q.shape[1], q.shape[2]
head_dim = embed_dim // self.num_heads
assert head_dim * self.num_heads == embed_dim, 'embed_dim must be divisible by num_heads'
scaling = float(head_dim) ** -0.5
_b = self.in_proj_bias
_start = None
_end = embed_dim
_w = self.in_proj_weight[:_end, :]
if _b is not None:
_b = _b[:_end]
q = F.linear(q, _w, _b)
_b = self.in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = self.in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(k, _w, _b)
_b = self.in_proj_bias
_start = embed_dim * 2
_end = None
_w = self.in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(v, _w, _b)
q = q * scaling
q = q.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0).repeat(bsz, 1, 1)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attn_mask = attn_mask.reshape(-1, *attn_mask.shape[2:])
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, tsz, 1)
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, self
.num_heads, 1, 1)
key_padding_mask = key_padding_mask.reshape(-1, *
key_padding_mask.shape[2:])
if attn_mask is not None and key_padding_mask is not None:
mask = attn_mask + key_padding_mask
elif attn_mask is not None:
mask = attn_mask
elif key_padding_mask is not None:
mask = key_padding_mask
else:
mask = None
attn_output = self.dotproductattention(q, k, v, mask)
attn_output = attn_output.transpose(0, 1).contiguous().view(tsz,
bsz, self.embed_dim)
return self.out_proj(attn_output), None
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerEncoderLayerNew, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayerNew, self).__setstate__(state)
def forward(self, input_0):
primals_3 = self.self_attn.in_proj_weight
primals_2 = self.self_attn.in_proj_bias
primals_4 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_8 = self.linear1.weight
primals_9 = self.linear1.bias
primals_10 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.norm1.weight
primals_11 = self.norm1.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
amazon-research/long-short-term-transformer
|
TransformerEncoderLayer
| false
| 14,844
|
[
"Apache-2.0"
] | 52
|
a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
https://github.com/amazon-research/long-short-term-transformer/tree/a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
SetConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SetConv(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats, hid_units):
super(SetConv, self).__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, samples, predicates, joins, sample_mask,
predicate_mask, join_mask):
hid_sample = F.relu(self.sample_mlp1(samples))
hid_sample = F.relu(self.sample_mlp2(hid_sample))
hid_sample = hid_sample * sample_mask
hid_sample = torch.sum(hid_sample, dim=1, keepdim=False)
sample_norm = sample_mask.sum(1, keepdim=False)
hid_sample = hid_sample / sample_norm
hid_predicate = F.relu(self.predicate_mlp1(predicates))
hid_predicate = F.relu(self.predicate_mlp2(hid_predicate))
hid_predicate = hid_predicate * predicate_mask
hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False)
predicate_norm = predicate_mask.sum(1, keepdim=False)
hid_predicate = hid_predicate / predicate_norm
hid_join = F.relu(self.join_mlp1(joins))
hid_join = F.relu(self.join_mlp2(hid_join))
hid_join = hid_join * join_mask
hid_join = torch.sum(hid_join, dim=1, keepdim=False)
join_norm = join_mask.sum(1, keepdim=False)
hid_join = hid_join / join_norm
hid = torch.cat((hid_sample, hid_predicate, hid_join), 1)
hid = F.relu(self.out_mlp1(hid))
out = torch.sigmoid(self.out_mlp2(hid))
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4])]
def get_init_inputs():
return [[], {'sample_feats': 4, 'predicate_feats': 4, 'join_feats': 4,
'hid_units': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (16 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tl.full([1], 12, tl.int64)
tmp34 = tl.load(in_ptr4 + (4 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr5 + (16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + (4 + 16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.load(in_ptr5 + (8 + 16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 + tmp38
tmp40 = tl.load(in_ptr5 + (12 + 16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp34 / tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp31, tmp42, tmp43)
tmp45 = tl.where(tmp19, tmp30, tmp44)
tmp46 = tl.where(tmp4, tmp15, tmp45)
tl.store(out_ptr0 + x2, tmp46, xmask)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_19, (4, 12), (12, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (1, 4), (4, 1))
assert_size_stride(primals_22, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf2, primals_5,
primals_6, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
del primals_7
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf5,
primals_8, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf6, primals_11,
primals_12, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_15, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf8)
del primals_13
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0)
del buf8
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf9,
primals_14, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf10, primals_17,
primals_18, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_2[grid(48)](buf3, primals_6, buf7, primals_12,
buf11, primals_18, buf12, 48, XBLOCK=64, num_warps=1, num_stages=1)
del buf11
del buf3
buf13 = buf7
del buf7
extern_kernels.mm(buf12, reinterpret_tensor(primals_19, (12, 4), (1,
12), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_3[grid(16)](buf14, primals_20, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_20
buf15 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_21, (4, 1), (1,
4), 0), out=buf15)
buf16 = buf15
del buf15
triton_poi_fused_sigmoid_4[grid(4)](buf16, primals_22, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_22
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf10,
primals_17, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del primals_17
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf6,
primals_11, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf6
del primals_11
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf2,
primals_5, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del primals_5
return (buf16, primals_6, primals_12, primals_18, reinterpret_tensor(
primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_15, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (16, 4),
(4, 1), 0), buf12, buf14, buf16, primals_21, primals_19, buf17,
primals_16, buf18, buf19, primals_10, buf20, buf21, primals_4, buf22)
class SetConvNew(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats, hid_units):
super(SetConvNew, self).__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
primals_1 = self.sample_mlp1.weight
primals_2 = self.sample_mlp1.bias
primals_4 = self.sample_mlp2.weight
primals_5 = self.sample_mlp2.bias
primals_7 = self.predicate_mlp1.weight
primals_8 = self.predicate_mlp1.bias
primals_10 = self.predicate_mlp2.weight
primals_11 = self.predicate_mlp2.bias
primals_13 = self.join_mlp1.weight
primals_14 = self.join_mlp1.bias
primals_16 = self.join_mlp2.weight
primals_17 = self.join_mlp2.bias
primals_19 = self.out_mlp1.weight
primals_20 = self.out_mlp1.bias
primals_21 = self.out_mlp2.weight
primals_22 = self.out_mlp2.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_12 = input_3
primals_15 = input_4
primals_18 = input_5
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22])
return output[0]
|
amogkam/learnedcardinalities
|
SetConv
| false
| 14,845
|
[
"MIT"
] | 64
|
295eabcf9ede38e7e9d1a6a8bcd00f349b628bf9
|
https://github.com/amogkam/learnedcardinalities/tree/295eabcf9ede38e7e9d1a6a8bcd00f349b628bf9
|
MAE
|
import torch
import torch.nn as nn
class MAE(nn.Module):
def __init__(self):
super(MAE, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float() * (outputs > 0).float()
err = torch.abs(target * val_pixels - outputs * val_pixels)
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1,
keepdim=True)
return torch.mean(loss / cnt) * 1000
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp4 > tmp1
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tmp0 * tmp7
tmp9 = tmp4 * tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp15, xmask)
tl.store(out_ptr1 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused_div_mean_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = 4.0
tmp7 = tmp5 / tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_div_mean_mul_1[grid(1)](buf3, buf0, buf1, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class MAENew(nn.Module):
def __init__(self):
super(MAENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anglixjtu/MSG_CHN_WACV20
|
MAE
| false
| 14,846
|
[
"Apache-2.0"
] | 61
|
6910894cf3caed2ffde27586f96b132b0c1d1a98
|
https://github.com/anglixjtu/MSG_CHN_WACV20/tree/6910894cf3caed2ffde27586f96b132b0c1d1a98
|
TransformerDecoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class DotProductAttention(nn.Module):
def __init__(self, dropout=0.0):
super(DotProductAttention, self).__init__()
self.dropout = dropout
def forward(self, q, k, v, attn_mask=None):
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_output_weights += attn_mask
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout,
training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
return attn_output
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim=
None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim ==
embed_dim)
if self._qkv_same_embed_dim:
self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim,
embed_dim))
else:
raise RuntimeError(
'Do not support q, k, v have different dimensions')
if bias:
self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim)
if self._qkv_same_embed_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
self.dotproductattention = DotProductAttention(dropout)
def forward(self, q, k, v, attn_mask=None, key_padding_mask=None):
tsz, bsz, embed_dim = q.shape[0], q.shape[1], q.shape[2]
head_dim = embed_dim // self.num_heads
assert head_dim * self.num_heads == embed_dim, 'embed_dim must be divisible by num_heads'
scaling = float(head_dim) ** -0.5
_b = self.in_proj_bias
_start = None
_end = embed_dim
_w = self.in_proj_weight[:_end, :]
if _b is not None:
_b = _b[:_end]
q = F.linear(q, _w, _b)
_b = self.in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = self.in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(k, _w, _b)
_b = self.in_proj_bias
_start = embed_dim * 2
_end = None
_w = self.in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(v, _w, _b)
q = q * scaling
q = q.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0).repeat(bsz, 1, 1)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attn_mask = attn_mask.reshape(-1, *attn_mask.shape[2:])
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, tsz, 1)
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, self
.num_heads, 1, 1)
key_padding_mask = key_padding_mask.reshape(-1, *
key_padding_mask.shape[2:])
if attn_mask is not None and key_padding_mask is not None:
mask = attn_mask + key_padding_mask
elif attn_mask is not None:
mask = attn_mask
elif key_padding_mask is not None:
mask = key_padding_mask
else:
mask = None
attn_output = self.dotproductattention(q, k, v, mask)
attn_output = attn_output.transpose(0, 1).contiguous().view(tsz,
bsz, self.embed_dim)
return self.out_proj(attn_output), None
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=
dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.tgt_cache = None
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def stream_inference(self, tgt, memory, pos, tgt_mask=None, memory_mask
=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):
if self.tgt_cache is None:
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
self.tgt_cache = tgt
else:
tgt = self.tgt_cache
tgt2 = self.multihead_attn.stream_inference(tgt, memory, memory,
pos, attn_mask=memory_mask, key_padding_mask=
memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=
memory_mask, key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused__softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (12,), (1,))
assert_size_stride(primals_9, (12, 4), (4, 1))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (2048, 4), (4, 1))
assert_size_stride(primals_16, (2048,), (1,))
assert_size_stride(primals_17, (4, 2048), (2048, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 4),
reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 8),
reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1,
16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(64)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf13)
buf14 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0)
del buf5
extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 4),
reinterpret_tensor(primals_10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf14)
buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 8),
reinterpret_tensor(primals_10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf15)
buf16 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0)
del buf13
triton_poi_fused_mul_0[grid(64)](buf16, primals_8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_8
buf17 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf14, (16, 1, 16), (1, 1, 16), 0), out=
buf17)
buf20 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_6[grid(64)](buf17, buf20, 64, 16, XBLOCK=
32, num_warps=4, num_stages=1)
del buf17
buf21 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf20, reinterpret_tensor(buf15, (16, 16, 1), (1,
16, 1), 0), out=buf21)
buf22 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 16)](buf21, buf22, 4, 16, XBLOCK=
16, YBLOCK=4, num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf21, (16, 4), (4, 1), 0)
del buf21
extern_kernels.mm(reinterpret_tensor(buf22, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf23)
buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0)
del buf23
triton_poi_fused_add_7[grid(64)](buf24, buf12, primals_12, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf25 = buf11
del buf11
buf26 = buf10
del buf10
triton_poi_fused_native_layer_norm_8[grid(16)](buf24, buf25, buf26,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf24, buf25, buf26,
primals_13, primals_14, buf27, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_14
buf28 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf27, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 2048), (1, 4), 0), out=buf28)
buf29 = reinterpret_tensor(buf28, (4, 4, 2048), (8192, 2048, 1), 0)
del buf28
buf35 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_10[grid(32768)](buf29,
primals_16, buf35, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_16
buf30 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0),
out=buf30)
buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0)
del buf30
triton_poi_fused_add_7[grid(64)](buf31, buf27, primals_18, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
buf32 = buf26
del buf26
buf33 = buf25
del buf25
triton_poi_fused_native_layer_norm_8[grid(16)](buf31, buf32, buf33,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf31, buf32, buf33,
primals_19, primals_20, buf34, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf32
del buf33
del primals_20
return (buf34, primals_1, primals_6, primals_13, primals_19, buf6,
reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9,
reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_10, (64, 4), (4, 1), 0), buf20, reinterpret_tensor(buf22, (
16, 4), (4, 1), 0), buf24, reinterpret_tensor(buf27, (16, 4), (4, 1
), 0), reinterpret_tensor(buf29, (16, 2048), (2048, 1), 0), buf31,
primals_17, buf35, primals_15, primals_11, reinterpret_tensor(buf15,
(16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf16, (16, 1, 4),
(1, 1, 16), 0), reinterpret_tensor(buf14, (16, 16, 1), (1, 16, 1),
0), reinterpret_tensor(primals_9, (4, 4), (4, 1), 0), primals_4,
reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0),
reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0),
reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0))
def _get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class DotProductAttention(nn.Module):
def __init__(self, dropout=0.0):
super(DotProductAttention, self).__init__()
self.dropout = dropout
def forward(self, q, k, v, attn_mask=None):
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_output_weights += attn_mask
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout,
training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
return attn_output
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim=
None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim ==
embed_dim)
if self._qkv_same_embed_dim:
self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim,
embed_dim))
else:
raise RuntimeError(
'Do not support q, k, v have different dimensions')
if bias:
self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim)
if self._qkv_same_embed_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
self.dotproductattention = DotProductAttention(dropout)
def forward(self, q, k, v, attn_mask=None, key_padding_mask=None):
tsz, bsz, embed_dim = q.shape[0], q.shape[1], q.shape[2]
head_dim = embed_dim // self.num_heads
assert head_dim * self.num_heads == embed_dim, 'embed_dim must be divisible by num_heads'
scaling = float(head_dim) ** -0.5
_b = self.in_proj_bias
_start = None
_end = embed_dim
_w = self.in_proj_weight[:_end, :]
if _b is not None:
_b = _b[:_end]
q = F.linear(q, _w, _b)
_b = self.in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = self.in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(k, _w, _b)
_b = self.in_proj_bias
_start = embed_dim * 2
_end = None
_w = self.in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(v, _w, _b)
q = q * scaling
q = q.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(
0, 1)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0).repeat(bsz, 1, 1)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attn_mask = attn_mask.reshape(-1, *attn_mask.shape[2:])
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, tsz, 1)
key_padding_mask = key_padding_mask.unsqueeze(1).repeat(1, self
.num_heads, 1, 1)
key_padding_mask = key_padding_mask.reshape(-1, *
key_padding_mask.shape[2:])
if attn_mask is not None and key_padding_mask is not None:
mask = attn_mask + key_padding_mask
elif attn_mask is not None:
mask = attn_mask
elif key_padding_mask is not None:
mask = key_padding_mask
else:
mask = None
attn_output = self.dotproductattention(q, k, v, mask)
attn_output = attn_output.transpose(0, 1).contiguous().view(tsz,
bsz, self.embed_dim)
return self.out_proj(attn_output), None
class TransformerDecoderLayerNew(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerDecoderLayerNew, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=
dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.tgt_cache = None
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayerNew, self).__setstate__(state)
def stream_inference(self, tgt, memory, pos, tgt_mask=None, memory_mask
=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):
if self.tgt_cache is None:
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
self.tgt_cache = tgt
else:
tgt = self.tgt_cache
tgt2 = self.multihead_attn.stream_inference(tgt, memory, memory,
pos, attn_mask=memory_mask, key_padding_mask=
memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self, input_0, input_1):
primals_3 = self.self_attn.in_proj_weight
primals_2 = self.self_attn.in_proj_bias
primals_4 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_9 = self.multihead_attn.in_proj_weight
primals_8 = self.multihead_attn.in_proj_bias
primals_11 = self.multihead_attn.out_proj.weight
primals_6 = self.multihead_attn.out_proj.bias
primals_15 = self.linear1.weight
primals_16 = self.linear1.bias
primals_17 = self.linear2.weight
primals_7 = self.linear2.bias
primals_12 = self.norm1.weight
primals_13 = self.norm1.bias
primals_14 = self.norm2.weight
primals_18 = self.norm2.bias
primals_19 = self.norm3.weight
primals_20 = self.norm3.bias
primals_1 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
|
amazon-research/long-short-term-transformer
|
TransformerDecoderLayer
| false
| 14,847
|
[
"Apache-2.0"
] | 52
|
a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
https://github.com/amazon-research/long-short-term-transformer/tree/a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
ConvNet
|
import torch
import torch.nn as nn
import torch.nn.functional as tF
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 5, 3, 1)
self.conv2 = nn.Conv2d(5, 6, 4, 1, bias=False)
self.conv3 = nn.Conv2d(6, 7, 3, 1)
self.fc1 = nn.Linear(1 * 1 * 7, 10)
def forward(self, x):
x = tF.relu(self.conv1(x))
x = tF.max_pool2d(x, 2, 2)
x = tF.relu(self.conv2(x))
x = tF.max_pool2d(x, 2, 2)
x = tF.relu(self.conv3(x))
x = tF.max_pool2d(x, 2, 2)
x = x.view(-1, 1 * 1 * 7)
x = self.fc1(x)
return tF.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 76880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 5
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 19220
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x4 = xindex // 961
x3 = xindex // 4805
x5 = xindex % 4805
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 4832 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 4864 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 7
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 24 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 24 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 144
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (5, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (6, 5, 4, 4), (80, 16, 4, 1))
assert_size_stride(primals_5, (7, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_6, (7,), (1,))
assert_size_stride(primals_7, (10, 7), (7, 1))
assert_size_stride(primals_8, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 5, 62, 62), (19220, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 5, 62, 62), (19360, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(76880)](buf0, primals_2,
buf1, 76880, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 5, 31, 31), (4832, 961, 31, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 5, 31, 31), (4864, 961, 31, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(19220)](buf1, buf2,
buf3, 19220, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 6, 28, 28), (4704, 784, 28, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(18816)](buf5, 18816, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf7 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(4704)](buf5, buf6,
buf7, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 7, 12, 12), (1008, 144, 12, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(4032)](buf9, primals_6,
4032, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf10 = empty_strided_cuda((4, 7, 6, 6), (252, 36, 6, 1), torch.int8)
buf11 = empty_strided_cuda((4, 7, 6, 6), (252, 36, 6, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_5[grid(1008)](buf9, buf10,
buf11, 1008, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((144, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (144, 7),
(7, 1), 0), reinterpret_tensor(primals_7, (7, 10), (1, 7), 0),
alpha=1, beta=1, out=buf12)
del primals_8
buf15 = empty_strided_cuda((144, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(144)](buf12, buf15, 144, 10,
XBLOCK=128, num_warps=8, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, primals_5, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (144,
7), (7, 1), 0), buf15, primals_7)
class ConvNetNew(nn.Module):
def __init__(self):
super(ConvNetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 5, 3, 1)
self.conv2 = nn.Conv2d(5, 6, 4, 1, bias=False)
self.conv3 = nn.Conv2d(6, 7, 3, 1)
self.fc1 = nn.Linear(1 * 1 * 7, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv3.weight
primals_6 = self.conv3.bias
primals_7 = self.fc1.weight
primals_8 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
amyami187/nngeometry
|
ConvNet
| false
| 14,848
|
[
"MIT"
] | 103
|
cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
https://github.com/amyami187/nngeometry/tree/cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
MSELoss
|
import torch
import torch.nn as nn
class MSELoss(nn.Module):
def __init__(self):
super(MSELoss, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = torch.ne(target, 0).float()
loss = target * val_pixels - outputs * val_pixels
return torch.sum(loss ** 2) / torch.sum(val_pixels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_mul_ne_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp4 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.broadcast_to(tmp3, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp11 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_div_mul_ne_pow_sub_sum_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class MSELossNew(nn.Module):
def __init__(self):
super(MSELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anglixjtu/MSG_CHN_WACV20
|
MSELoss
| false
| 14,849
|
[
"Apache-2.0"
] | 61
|
6910894cf3caed2ffde27586f96b132b0c1d1a98
|
https://github.com/anglixjtu/MSG_CHN_WACV20/tree/6910894cf3caed2ffde27586f96b132b0c1d1a98
|
MeanAggregator
|
import torch
import torch.nn as nn
class MeanAggregator(nn.Module):
def __init__(self):
super(MeanAggregator, self).__init__()
def forward(self, x: 'torch.Tensor'):
return x.mean(dim=1)
def __call__(self, *args, **kwargs):
return super(MeanAggregator, self).__call__(*args, **kwargs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MeanAggregatorNew(nn.Module):
def __init__(self):
super(MeanAggregatorNew, self).__init__()
def __call__(self, *args, **kwargs):
return super(MeanAggregatorNew, self).__call__(*args, **kwargs)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
angpo/VKD
|
MeanAggregator
| false
| 14,850
|
[
"MIT"
] | 68
|
2a136e00dad4c73612d6efe087675604ac2416eb
|
https://github.com/angpo/VKD/tree/2a136e00dad4c73612d6efe087675604ac2416eb
|
DepthwiseSeparableConv
|
import torch
import torch.nn.functional as F
import torch.cuda
import torch.nn as nn
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
out_ch, kernel_size=1, padding=0, bias=bias)
def forward(self, x):
return F.relu(self.pointwise_conv(self.depthwise_conv(x)))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.cuda
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,),
dilation=(1,), transposed=False, output_padding=(0,), groups=4,
bias=None)
assert_size_stride(buf0, (1, 4, 5), (20, 5, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 5), (20, 5, 1))
buf2 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 5), (5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(20)](buf2,
primals_4, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_1, primals_3, reinterpret_tensor(primals_2, (1, 4,
4), (16, 4, 1), 0), buf0, buf3
class DepthwiseSeparableConvNew(nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
out_ch, kernel_size=1, padding=0, bias=bias)
def forward(self, input_0):
primals_1 = self.depthwise_conv.weight
primals_3 = self.pointwise_conv.weight
primals_4 = self.pointwise_conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andy840314/QANet-pytorch-
|
DepthwiseSeparableConv
| false
| 14,851
|
[
"MIT"
] | 92
|
3c11e2d7139e040eee90dd24b673eb1039957cae
|
https://github.com/andy840314/QANet-pytorch-/tree/3c11e2d7139e040eee90dd24b673eb1039957cae
|
BuildBlock
|
import torch
import torch.nn.functional as F
from torch import nn
class BuildBlock(nn.Module):
def __init__(self, planes=256):
super(BuildBlock, self).__init__()
self.planes = planes
self.toplayer1 = nn.Conv2d(2048, planes, kernel_size=1, stride=1,
padding=0)
self.toplayer2 = nn.Conv2d(256, planes, kernel_size=3, stride=1,
padding=1)
self.toplayer3 = nn.Conv2d(256, planes, kernel_size=3, stride=1,
padding=1)
self.toplayer4 = nn.Conv2d(256, planes, kernel_size=3, stride=1,
padding=1)
self.latlayer1 = nn.Conv2d(1024, planes, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, planes, kernel_size=1, stride=1,
padding=0)
self.latlayer3 = nn.Conv2d(256, planes, kernel_size=1, stride=1,
padding=0)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
return F.upsample(x, size=(H, W), mode='bilinear', align_corners=True
) + y
def forward(self, c2, c3, c4, c5):
p5 = self.toplayer1(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p4 = self.toplayer2(p4)
p3 = self._upsample_add(p4, self.latlayer2(c3))
p3 = self.toplayer3(p3)
p2 = self._upsample_add(p3, self.latlayer3(c2))
p2 = self.toplayer4(p2)
return p2, p3, p4, p5
def get_inputs():
return [torch.rand([4, 256, 64, 64]), torch.rand([4, 512, 64, 64]),
torch.rand([4, 1024, 64, 64]), torch.rand([4, 2048, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 2048
y1 = yindex // 2048
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1048576 * y1), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, None)
@triton.jit
def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 63, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_8(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = triton_helpers.minimum(tmp9, tmp2)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex // 64
x1 = xindex % 64
y0 = yindex
x5 = xindex
y3 = yindex % 256
y4 = yindex // 256
tmp0 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr6 + x2, None, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (y3 + 256 * x5 + 1048576 * y4), None,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr7 + y3, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK, YBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * y0), None,
eviction_policy='evict_last')
tmp11 = tmp10 + tmp1
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr2 + (tmp13 + 64 * tmp4 + 4096 * y0), None,
eviction_policy='evict_last')
tmp15 = tmp14 - tmp9
tmp17 = tmp15 * tmp16
tmp18 = tmp9 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = tmp19 < 0
tmp22 = tl.where(tmp21, tmp20, tmp19)
tmp23 = tl.load(in_ptr2 + (tmp8 + 64 * tmp22 + 4096 * y0), None,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (tmp13 + 64 * tmp22 + 4096 * y0), None,
eviction_policy='evict_last')
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp16
tmp27 = tmp23 + tmp26
tmp28 = tmp27 - tmp18
tmp30 = tmp28 * tmp29
tmp31 = tmp18 + tmp30
tmp34 = tmp32 + tmp33
tmp35 = tmp31 + tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + (y3 + 256 * x5 + 1048576 * y4), tmp35, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (256, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1))
assert_size_stride(primals_4, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_7, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_17, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_18, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 1, 131072,
2048), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 4096)](primals_3, buf0, 8192, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536,
1024), torch.float32)
triton_poi_fused_1[grid(4096, 4096)](primals_6, buf1, 4096, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_7, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_7
buf3 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
triton_poi_fused_3[grid(2048, 4096)](primals_11, buf3, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_11
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_12, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
triton_poi_fused_4[grid(1024, 4096)](primals_16, buf5, 1024, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_16
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_17, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_17
buf7 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf8 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_5[grid(1024, 4096)](buf7, primals_2,
buf8, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf9 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf10 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_8[grid(64)](buf14,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_8[grid(64)](buf16,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf17 = buf9
del buf9
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(1024,
4096)](buf17, buf10, buf12, buf8, buf13, buf14, buf11, buf16,
primals_5, 1024, 4096, XBLOCK=256, YBLOCK=16, num_warps=8,
num_stages=1)
del primals_5
buf18 = extern_kernels.convolution(buf17, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf19 = reinterpret_tensor(buf7, (4, 256, 64, 64), (1048576, 4096,
64, 1), 0)
del buf7
triton_poi_fused_convolution_5[grid(1024, 4096)](buf18, primals_8,
buf19, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf20 = extern_kernels.convolution(buf3, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf22 = buf20
del buf20
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(1024,
4096)](buf22, buf10, buf12, buf19, buf13, buf14, buf11, buf16,
primals_10, 1024, 4096, XBLOCK=256, YBLOCK=16, num_warps=8,
num_stages=1)
del primals_10
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf24 = reinterpret_tensor(buf18, (4, 256, 64, 64), (1048576, 4096,
64, 1), 0)
del buf18
triton_poi_fused_convolution_5[grid(1024, 4096)](buf23, primals_13,
buf24, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_13
buf25 = extern_kernels.convolution(buf5, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf27 = buf25
del buf25
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(1024,
4096)](buf27, buf10, buf12, buf24, buf13, buf14, buf11, buf16,
primals_15, 1024, 4096, XBLOCK=256, YBLOCK=16, num_warps=8,
num_stages=1)
del primals_15
buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf29 = reinterpret_tensor(buf23, (4, 256, 64, 64), (1048576, 4096,
64, 1), 0)
del buf23
triton_poi_fused_convolution_5[grid(1024, 4096)](buf28, primals_18,
buf29, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf28
del primals_18
return (buf29, buf24, buf19, buf8, primals_1, buf0, primals_4, buf1,
buf2, primals_9, buf3, buf4, primals_14, buf5, buf6, buf10, buf11,
buf12, buf13, buf14, buf16, buf17, buf22, buf27)
class BuildBlockNew(nn.Module):
def __init__(self, planes=256):
super(BuildBlockNew, self).__init__()
self.planes = planes
self.toplayer1 = nn.Conv2d(2048, planes, kernel_size=1, stride=1,
padding=0)
self.toplayer2 = nn.Conv2d(256, planes, kernel_size=3, stride=1,
padding=1)
self.toplayer3 = nn.Conv2d(256, planes, kernel_size=3, stride=1,
padding=1)
self.toplayer4 = nn.Conv2d(256, planes, kernel_size=3, stride=1,
padding=1)
self.latlayer1 = nn.Conv2d(1024, planes, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, planes, kernel_size=1, stride=1,
padding=0)
self.latlayer3 = nn.Conv2d(256, planes, kernel_size=1, stride=1,
padding=0)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
return F.upsample(x, size=(H, W), mode='bilinear', align_corners=True
) + y
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.toplayer1.weight
primals_2 = self.toplayer1.bias
primals_7 = self.toplayer2.weight
primals_5 = self.toplayer2.bias
primals_12 = self.toplayer3.weight
primals_8 = self.toplayer3.bias
primals_17 = self.toplayer4.weight
primals_10 = self.toplayer4.bias
primals_4 = self.latlayer1.weight
primals_13 = self.latlayer1.bias
primals_9 = self.latlayer2.weight
primals_15 = self.latlayer2.bias
primals_14 = self.latlayer3.weight
primals_18 = self.latlayer3.bias
primals_16 = input_0
primals_11 = input_1
primals_6 = input_2
primals_3 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0], output[1], output[2], output[3]
|
YacobBY/ICDAR2019-ArT-Recognition-Alchemy
|
BuildBlock
| false
| 14,852
|
[
"MIT"
] | 209
|
911c572c2aff4599a74b7974d46ef4cfb17078b9
|
https://github.com/YacobBY/ICDAR2019-ArT-Recognition-Alchemy/tree/911c572c2aff4599a74b7974d46ef4cfb17078b9
|
ResNetV2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from collections import OrderedDict
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2,
block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=
128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2,
block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=
256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2,
block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid
=512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin
=2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2,
block_units[3] + 1)])))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 *
wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn.
AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf,
head_size, kernel_size=1, bias=True))]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1)
return x[..., 0, 0]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[
f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[
f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[
f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[
f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block_units': [4, 4, 4, 4], 'width_factor': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2048
y1 = yindex // 2048
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 2048 * x2 + 18432 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 34
x1 = xindex // 256 % 34
x3 = xindex // 295936
x4 = xindex % 8704
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-8448 + x4 + 8192 * x2 + 262144 * x3), tmp10,
other=0.0)
tl.store(out_ptr0 + x6, tmp11, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 16
x2 = xindex // 4096 % 16
x3 = xindex // 65536
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp5 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp7 = tl.load(in_ptr0 + (8960 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp9 = tl.load(in_ptr0 + (9216 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp11 = tl.load(in_ptr0 + (17408 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp13 = tl.load(in_ptr0 + (17664 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp15 = tl.load(in_ptr0 + (17920 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, None)
tl.store(out_ptr1 + x4, tmp41, None)
@triton.jit
def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_12(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_14(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 262144 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_15(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_16(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_18(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_21(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_26(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_27(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_28(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_29(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_31(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_32(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_33(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_native_group_norm_34(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = rindex // 32
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_35(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_36(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = rindex // 128
tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_41(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_42(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask)
@triton.jit
def triton_per_fused_native_group_norm_44(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = rindex // 64
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_native_group_norm_46(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = rindex // 64
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 8192 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_47(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_48(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_poi_fused_add_49(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_50(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = rindex // 256
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_51(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 8192
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 256), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 256), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_52(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 8192 * x0), tmp12, rmask)
@triton.jit
def triton_poi_fused_add_53(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_54(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = rindex // 256
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
@triton.jit
def triton_poi_fused_mean_native_group_norm_relu_55(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8192
x1 = xindex // 8192
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32768 * x1), None)
tmp1 = tl.load(in_ptr1 + x2 // 256, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2 // 256, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8192 + x0 + 32768 * x1), None)
tmp18 = tl.load(in_ptr0 + (16384 + x0 + 32768 * x1), None)
tmp25 = tl.load(in_ptr0 + (24576 + x0 + 32768 * x1), None)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = tmp11 - tmp1
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp5
tmp15 = tmp14 + tmp7
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp17 = tmp10 + tmp16
tmp19 = tmp18 - tmp1
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp7
tmp23 = triton_helpers.maximum(tmp9, tmp22)
tmp24 = tmp17 + tmp23
tmp26 = tmp25 - tmp1
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp5
tmp29 = tmp28 + tmp7
tmp30 = triton_helpers.maximum(tmp9, tmp29)
tmp31 = tmp24 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.store(out_ptr0 + x2, tmp33, None)
@triton.jit
def triton_poi_fused_convolution_56(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 87372
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21843
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154) = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_13, (1024,), (1,))
assert_size_stride(primals_14, (1024,), (1,))
assert_size_stride(primals_15, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_16, (256,), (1,))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256,), (1,))
assert_size_stride(primals_21, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_22, (1024,), (1,))
assert_size_stride(primals_23, (1024,), (1,))
assert_size_stride(primals_24, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_25, (256,), (1,))
assert_size_stride(primals_26, (256,), (1,))
assert_size_stride(primals_27, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (256,), (1,))
assert_size_stride(primals_29, (256,), (1,))
assert_size_stride(primals_30, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_31, (1024,), (1,))
assert_size_stride(primals_32, (1024,), (1,))
assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_34, (256,), (1,))
assert_size_stride(primals_35, (256,), (1,))
assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256,), (1,))
assert_size_stride(primals_39, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_40, (1024,), (1,))
assert_size_stride(primals_41, (1024,), (1,))
assert_size_stride(primals_42, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_43, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_44, (512,), (1,))
assert_size_stride(primals_45, (512,), (1,))
assert_size_stride(primals_46, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_47, (512,), (1,))
assert_size_stride(primals_48, (512,), (1,))
assert_size_stride(primals_49, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_50, (2048,), (1,))
assert_size_stride(primals_51, (2048,), (1,))
assert_size_stride(primals_52, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_53, (512,), (1,))
assert_size_stride(primals_54, (512,), (1,))
assert_size_stride(primals_55, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_56, (512,), (1,))
assert_size_stride(primals_57, (512,), (1,))
assert_size_stride(primals_58, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_59, (2048,), (1,))
assert_size_stride(primals_60, (2048,), (1,))
assert_size_stride(primals_61, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_62, (512,), (1,))
assert_size_stride(primals_63, (512,), (1,))
assert_size_stride(primals_64, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_65, (512,), (1,))
assert_size_stride(primals_66, (512,), (1,))
assert_size_stride(primals_67, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_68, (2048,), (1,))
assert_size_stride(primals_69, (2048,), (1,))
assert_size_stride(primals_70, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_71, (512,), (1,))
assert_size_stride(primals_72, (512,), (1,))
assert_size_stride(primals_73, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_74, (512,), (1,))
assert_size_stride(primals_75, (512,), (1,))
assert_size_stride(primals_76, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_77, (2048,), (1,))
assert_size_stride(primals_78, (2048,), (1,))
assert_size_stride(primals_79, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_80, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_81, (1024,), (1,))
assert_size_stride(primals_82, (1024,), (1,))
assert_size_stride(primals_83, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_84, (1024,), (1,))
assert_size_stride(primals_85, (1024,), (1,))
assert_size_stride(primals_86, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_87, (4096,), (1,))
assert_size_stride(primals_88, (4096,), (1,))
assert_size_stride(primals_89, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_90, (1024,), (1,))
assert_size_stride(primals_91, (1024,), (1,))
assert_size_stride(primals_92, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_93, (1024,), (1,))
assert_size_stride(primals_94, (1024,), (1,))
assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_96, (4096,), (1,))
assert_size_stride(primals_97, (4096,), (1,))
assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_99, (1024,), (1,))
assert_size_stride(primals_100, (1024,), (1,))
assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_102, (1024,), (1,))
assert_size_stride(primals_103, (1024,), (1,))
assert_size_stride(primals_104, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_105, (4096,), (1,))
assert_size_stride(primals_106, (4096,), (1,))
assert_size_stride(primals_107, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_108, (1024,), (1,))
assert_size_stride(primals_109, (1024,), (1,))
assert_size_stride(primals_110, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_111, (1024,), (1,))
assert_size_stride(primals_112, (1024,), (1,))
assert_size_stride(primals_113, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_114, (4096,), (1,))
assert_size_stride(primals_115, (4096,), (1,))
assert_size_stride(primals_116, (8192, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_117, (2048, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_118, (2048,), (1,))
assert_size_stride(primals_119, (2048,), (1,))
assert_size_stride(primals_120, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_121, (2048,), (1,))
assert_size_stride(primals_122, (2048,), (1,))
assert_size_stride(primals_123, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_124, (8192,), (1,))
assert_size_stride(primals_125, (8192,), (1,))
assert_size_stride(primals_126, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_127, (2048,), (1,))
assert_size_stride(primals_128, (2048,), (1,))
assert_size_stride(primals_129, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_130, (2048,), (1,))
assert_size_stride(primals_131, (2048,), (1,))
assert_size_stride(primals_132, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_133, (8192,), (1,))
assert_size_stride(primals_134, (8192,), (1,))
assert_size_stride(primals_135, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_136, (2048,), (1,))
assert_size_stride(primals_137, (2048,), (1,))
assert_size_stride(primals_138, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_139, (2048,), (1,))
assert_size_stride(primals_140, (2048,), (1,))
assert_size_stride(primals_141, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_142, (8192,), (1,))
assert_size_stride(primals_143, (8192,), (1,))
assert_size_stride(primals_144, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_145, (2048,), (1,))
assert_size_stride(primals_146, (2048,), (1,))
assert_size_stride(primals_147, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_148, (2048,), (1,))
assert_size_stride(primals_149, (2048,), (1,))
assert_size_stride(primals_150, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_151, (8192,), (1,))
assert_size_stride(primals_152, (8192,), (1,))
assert_size_stride(primals_153, (21843, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_154, (21843,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_9, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_9
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_18, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_27, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_27
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_36, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_36
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_46, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_46
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_55, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_55
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_64, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_64
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_73, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_73
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_83, buf10, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_83
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_92, buf11, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_92
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_101, buf12, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_101
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_110, buf13, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_110
buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_120, buf14, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_120
buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_129, buf15, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_129
buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_138, buf16, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_138
buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_147, buf17, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_147
buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf19
buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
triton_per_fused_add_div_sqrt_sub_var_mean_6[grid(256)](buf21, buf0,
buf22, 256, 147, XBLOCK=1, num_warps=2, num_stages=1)
buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(1183744)](buf23, buf24,
1183744, XBLOCK=1024, num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(262144)](buf24,
buf25, buf26, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf25, buf27, buf28,
buf30, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf25,
buf27, buf28, primals_3, primals_4, buf31, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_4
buf33 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf35 = reinterpret_tensor(buf33, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf33
buf36 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf35,
primals_5, buf36, 1024, 256, num_warps=2, num_stages=1)
buf37 = extern_kernels.convolution(buf31, buf36, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf39 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf41 = reinterpret_tensor(buf39, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf39
buf42 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_12[grid(256)](buf41,
primals_6, buf42, 256, 256, num_warps=2, num_stages=1)
buf43 = extern_kernels.convolution(buf31, buf42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf44 = buf28
del buf28
buf45 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf47 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf43, buf44, buf45,
buf47, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf48 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf43,
buf44, buf45, primals_7, primals_8, buf48, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_8
buf49 = extern_kernels.convolution(buf48, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf50 = buf45
del buf45
buf51 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf53 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf49, buf50, buf51,
buf53, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf54 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf49,
buf50, buf51, primals_10, primals_11, buf54, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_11
buf56 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf58 = reinterpret_tensor(buf56, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf56
buf59 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf58,
primals_12, buf59, 1024, 256, num_warps=2, num_stages=1)
buf60 = extern_kernels.convolution(buf54, buf59, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf61 = buf37
del buf37
triton_poi_fused_add_13[grid(1048576)](buf61, buf60, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf62 = buf51
del buf51
buf63 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf65 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_14[grid(128)](buf61, buf62,
buf63, buf65, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf66 = buf60
del buf60
triton_poi_fused_native_group_norm_relu_15[grid(1048576)](buf61,
buf62, buf63, primals_13, primals_14, buf66, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_14
buf68 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf70 = reinterpret_tensor(buf68, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf68
buf71 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_16[grid(256)](buf70,
primals_15, buf71, 256, 1024, num_warps=8, num_stages=1)
buf72 = extern_kernels.convolution(buf66, buf71, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf73 = buf63
del buf63
buf74 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf76 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf72, buf73, buf74,
buf76, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf77 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf72,
buf73, buf74, primals_16, primals_17, buf77, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_17
buf78 = extern_kernels.convolution(buf77, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf79 = buf74
del buf74
buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf82 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf78, buf79, buf80,
buf82, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf83 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf78,
buf79, buf80, primals_19, primals_20, buf83, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_20
buf85 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf87 = reinterpret_tensor(buf85, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf85
buf88 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf87,
primals_21, buf88, 1024, 256, num_warps=2, num_stages=1)
buf89 = extern_kernels.convolution(buf83, buf88, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf90 = buf89
del buf89
triton_poi_fused_add_17[grid(1048576)](buf90, buf61, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
buf91 = buf80
del buf80
buf92 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf94 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_14[grid(128)](buf90, buf91,
buf92, buf94, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf95 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1,
16384, 1024), 0)
del buf23
triton_poi_fused_native_group_norm_relu_15[grid(1048576)](buf90,
buf91, buf92, primals_22, primals_23, buf95, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_23
buf97 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf99 = reinterpret_tensor(buf97, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf97
buf100 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_16[grid(256)](buf99,
primals_24, buf100, 256, 1024, num_warps=8, num_stages=1)
buf101 = extern_kernels.convolution(buf95, buf100, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf102 = buf92
del buf92
buf103 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf105 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf101, buf102,
buf103, buf105, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf106 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf101,
buf102, buf103, primals_25, primals_26, buf106, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_26
buf107 = extern_kernels.convolution(buf106, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf108 = buf103
del buf103
buf109 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf107, buf108,
buf109, buf111, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf112 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf107,
buf108, buf109, primals_28, primals_29, buf112, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_29
buf114 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf116 = reinterpret_tensor(buf114, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf114
buf117 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf116,
primals_30, buf117, 1024, 256, num_warps=2, num_stages=1)
buf118 = extern_kernels.convolution(buf112, buf117, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf119 = buf118
del buf118
triton_poi_fused_add_17[grid(1048576)](buf119, buf90, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
buf120 = buf109
del buf109
buf121 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf123 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_14[grid(128)](buf119, buf120,
buf121, buf123, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf124 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.float32)
triton_poi_fused_native_group_norm_relu_15[grid(1048576)](buf119,
buf120, buf121, primals_31, primals_32, buf124, 1048576, XBLOCK
=1024, num_warps=4, num_stages=1)
del primals_32
buf126 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf128 = reinterpret_tensor(buf126, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf126
buf129 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_16[grid(256)](buf128,
primals_33, buf129, 256, 1024, num_warps=8, num_stages=1)
buf130 = extern_kernels.convolution(buf124, buf129, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf131 = buf121
del buf121
buf132 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf134 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf130, buf131,
buf132, buf134, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf135 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf130,
buf131, buf132, primals_34, primals_35, buf135, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_35
buf136 = extern_kernels.convolution(buf135, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf137 = buf132
del buf132
buf138 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf140 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf136, buf137,
buf138, buf140, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf141 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf136,
buf137, buf138, primals_37, primals_38, buf141, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_38
buf143 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf145 = reinterpret_tensor(buf143, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf143
buf146 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf145,
primals_39, buf146, 1024, 256, num_warps=2, num_stages=1)
buf147 = extern_kernels.convolution(buf141, buf146, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf148 = buf147
del buf147
triton_poi_fused_add_17[grid(1048576)](buf148, buf119, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
buf149 = buf138
del buf138
buf150 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf152 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_14[grid(128)](buf148, buf149,
buf150, buf152, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf153 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.float32)
triton_poi_fused_native_group_norm_relu_15[grid(1048576)](buf148,
buf149, buf150, primals_40, primals_41, buf153, 1048576, XBLOCK
=1024, num_warps=4, num_stages=1)
del primals_41
buf155 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf157 = reinterpret_tensor(buf155, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf155
buf158 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_18[grid(2048)](buf157,
primals_42, buf158, 2048, 1024, num_warps=8, num_stages=1)
buf159 = extern_kernels.convolution(buf153, buf158, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf161 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf163 = reinterpret_tensor(buf161, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf161
buf164 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(512)](buf163,
primals_43, buf164, 512, 1024, num_warps=8, num_stages=1)
buf165 = extern_kernels.convolution(buf153, buf164, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf166 = buf150
del buf150
buf167 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf169 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_20[grid(128)](buf165, buf166,
buf167, buf169, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf170 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512
), torch.float32)
triton_poi_fused_native_group_norm_relu_21[grid(524288)](buf165,
buf166, buf167, primals_44, primals_45, buf170, 524288, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_45
buf171 = extern_kernels.convolution(buf170, buf6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf172 = buf167
del buf167
buf173 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf175 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf171, buf172,
buf173, buf175, 128, 1024, num_warps=8, num_stages=1)
buf176 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf171,
buf172, buf173, primals_47, primals_48, buf176, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_48
buf178 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf180 = reinterpret_tensor(buf178, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf178
buf181 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_24[grid(2048)](buf180,
primals_49, buf181, 2048, 512, num_warps=4, num_stages=1)
buf182 = extern_kernels.convolution(buf176, buf181, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf182, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf183 = buf159
del buf159
triton_poi_fused_add_25[grid(524288)](buf183, buf182, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
buf184 = buf173
del buf173
buf185 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_26[grid(128)](buf183, buf184,
buf185, buf187, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf188 = buf182
del buf182
triton_poi_fused_native_group_norm_relu_27[grid(524288)](buf183,
buf184, buf185, primals_50, primals_51, buf188, 524288, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_51
buf190 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf192 = reinterpret_tensor(buf190, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf190
buf193 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_28[grid(512)](buf192,
primals_52, buf193, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf194 = extern_kernels.convolution(buf188, buf193, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf194, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf195 = buf185
del buf185
buf196 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf194, buf195,
buf196, buf198, 128, 1024, num_warps=8, num_stages=1)
buf199 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf194,
buf195, buf196, primals_53, primals_54, buf199, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_54
buf200 = extern_kernels.convolution(buf199, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf200, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf201 = buf196
del buf196
buf202 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf204 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf200, buf201,
buf202, buf204, 128, 1024, num_warps=8, num_stages=1)
buf205 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf200,
buf201, buf202, primals_56, primals_57, buf205, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_57
buf207 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf209 = reinterpret_tensor(buf207, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf207
buf210 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_24[grid(2048)](buf209,
primals_58, buf210, 2048, 512, num_warps=4, num_stages=1)
buf211 = extern_kernels.convolution(buf205, buf210, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf211, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf212 = buf211
del buf211
triton_poi_fused_add_29[grid(524288)](buf212, buf183, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf213 = buf202
del buf202
buf214 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf216 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_26[grid(128)](buf212, buf213,
buf214, buf216, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf217 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_27[grid(524288)](buf212,
buf213, buf214, primals_59, primals_60, buf217, 524288, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_60
buf219 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf221 = reinterpret_tensor(buf219, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf219
buf222 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_28[grid(512)](buf221,
primals_61, buf222, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf223 = extern_kernels.convolution(buf217, buf222, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf223, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf224 = buf214
del buf214
buf225 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf227 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf223, buf224,
buf225, buf227, 128, 1024, num_warps=8, num_stages=1)
buf228 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf223,
buf224, buf225, primals_62, primals_63, buf228, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_63
buf229 = extern_kernels.convolution(buf228, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf229, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf230 = buf225
del buf225
buf231 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf233 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf229, buf230,
buf231, buf233, 128, 1024, num_warps=8, num_stages=1)
buf234 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf229,
buf230, buf231, primals_65, primals_66, buf234, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_66
buf236 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf238 = reinterpret_tensor(buf236, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf236
buf239 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_24[grid(2048)](buf238,
primals_67, buf239, 2048, 512, num_warps=4, num_stages=1)
buf240 = extern_kernels.convolution(buf234, buf239, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf240, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf241 = buf240
del buf240
triton_poi_fused_add_29[grid(524288)](buf241, buf212, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf242 = buf231
del buf231
buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_26[grid(128)](buf241, buf242,
buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_27[grid(524288)](buf241,
buf242, buf243, primals_68, primals_69, buf246, 524288, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_69
buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf248
buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_28[grid(512)](buf250,
primals_70, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf253 = buf243
del buf243
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf252, buf253,
buf254, buf256, 128, 1024, num_warps=8, num_stages=1)
buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf252,
buf253, buf254, primals_71, primals_72, buf257, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_72
buf258 = extern_kernels.convolution(buf257, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf258, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf259 = buf254
del buf254
buf260 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf262 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_22[grid(128)](buf258, buf259,
buf260, buf262, 128, 1024, num_warps=8, num_stages=1)
buf263 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(131072)](buf258,
buf259, buf260, primals_74, primals_75, buf263, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_75
buf265 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf267 = reinterpret_tensor(buf265, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf265
buf268 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_24[grid(2048)](buf267,
primals_76, buf268, 2048, 512, num_warps=4, num_stages=1)
buf269 = extern_kernels.convolution(buf263, buf268, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf269, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf270 = buf269
del buf269
triton_poi_fused_add_29[grid(524288)](buf270, buf241, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf271 = buf260
del buf260
buf272 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf274 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_26[grid(128)](buf270, buf271,
buf272, buf274, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf275 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_27[grid(524288)](buf270,
buf271, buf272, primals_77, primals_78, buf275, 524288, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_78
buf277 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf279 = reinterpret_tensor(buf277, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf277
buf280 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(4096)](buf279,
primals_79, buf280, 4096, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf281 = extern_kernels.convolution(buf275, buf280, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf281, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf283 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf285 = reinterpret_tensor(buf283, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf283
buf286 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_31[grid(1024)](buf285,
primals_80, buf286, 1024, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf287 = extern_kernels.convolution(buf275, buf286, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf287, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf288 = buf272
del buf272
buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf291 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_32[grid(128)](buf287, buf288,
buf289, buf291, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf292 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_33[grid(262144)](buf287,
buf288, buf289, primals_81, primals_82, buf292, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_82
buf293 = extern_kernels.convolution(buf292, buf10, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf293, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf294 = buf289
del buf289
buf295 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf297 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf293, buf294,
buf295, buf297, 128, 512, num_warps=4, num_stages=1)
buf298 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf293,
buf294, buf295, primals_84, primals_85, buf298, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_85
buf300 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf302 = reinterpret_tensor(buf300, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf300
buf303 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_36[grid(4096)](buf302,
primals_86, buf303, 4096, 1024, num_warps=8, num_stages=1)
buf304 = extern_kernels.convolution(buf298, buf303, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf304, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf305 = buf281
del buf281
triton_poi_fused_add_37[grid(262144)](buf305, buf304, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf306 = buf295
del buf295
buf307 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_38[grid(128)](buf305, buf306,
buf307, buf309, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf310 = buf304
del buf304
triton_poi_fused_native_group_norm_relu_39[grid(262144)](buf305,
buf306, buf307, primals_87, primals_88, buf310, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_88
buf312 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf314 = reinterpret_tensor(buf312, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf312
buf315 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_40[grid(1024)](buf314,
primals_89, buf315, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf316 = extern_kernels.convolution(buf310, buf315, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf316, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf317 = buf307
del buf307
buf318 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf320 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf316, buf317,
buf318, buf320, 128, 512, num_warps=4, num_stages=1)
buf321 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf316,
buf317, buf318, primals_90, primals_91, buf321, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_91
buf322 = extern_kernels.convolution(buf321, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf322, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf323 = buf318
del buf318
buf324 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf326 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf322, buf323,
buf324, buf326, 128, 512, num_warps=4, num_stages=1)
buf327 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf322,
buf323, buf324, primals_93, primals_94, buf327, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_94
buf329 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf331 = reinterpret_tensor(buf329, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf329
buf332 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_36[grid(4096)](buf331,
primals_95, buf332, 4096, 1024, num_warps=8, num_stages=1)
buf333 = extern_kernels.convolution(buf327, buf332, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf333, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf334 = buf333
del buf333
triton_poi_fused_add_41[grid(262144)](buf334, buf305, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf335 = buf324
del buf324
buf336 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf338 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_38[grid(128)](buf334, buf335,
buf336, buf338, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf339 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(262144)](buf334,
buf335, buf336, primals_96, primals_97, buf339, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_97
buf341 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf343 = reinterpret_tensor(buf341, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf341
buf344 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_40[grid(1024)](buf343,
primals_98, buf344, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf345 = extern_kernels.convolution(buf339, buf344, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf345, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf346 = buf336
del buf336
buf347 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf349 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf345, buf346,
buf347, buf349, 128, 512, num_warps=4, num_stages=1)
buf350 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf345,
buf346, buf347, primals_99, primals_100, buf350, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_100
buf351 = extern_kernels.convolution(buf350, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf351, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf352 = buf347
del buf347
buf353 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf355 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf351, buf352,
buf353, buf355, 128, 512, num_warps=4, num_stages=1)
buf356 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf351,
buf352, buf353, primals_102, primals_103, buf356, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_103
buf358 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf360 = reinterpret_tensor(buf358, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf358
buf361 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_36[grid(4096)](buf360,
primals_104, buf361, 4096, 1024, num_warps=8, num_stages=1)
buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf362, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf363 = buf362
del buf362
triton_poi_fused_add_41[grid(262144)](buf363, buf334, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf364 = buf353
del buf353
buf365 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf367 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_38[grid(128)](buf363, buf364,
buf365, buf367, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf368 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(262144)](buf363,
buf364, buf365, primals_105, primals_106, buf368, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_106
buf370 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf372 = reinterpret_tensor(buf370, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf370
buf373 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_40[grid(1024)](buf372,
primals_107, buf373, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf374 = extern_kernels.convolution(buf368, buf373, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf374, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf375 = buf365
del buf365
buf376 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf378 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf374, buf375,
buf376, buf378, 128, 512, num_warps=4, num_stages=1)
buf379 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf374,
buf375, buf376, primals_108, primals_109, buf379, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_109
buf380 = extern_kernels.convolution(buf379, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf380, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf381 = buf376
del buf376
buf382 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf384 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_34[grid(128)](buf380, buf381,
buf382, buf384, 128, 512, num_warps=4, num_stages=1)
buf385 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(65536)](buf380,
buf381, buf382, primals_111, primals_112, buf385, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_112
buf387 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf389 = reinterpret_tensor(buf387, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf387
buf390 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_36[grid(4096)](buf389,
primals_113, buf390, 4096, 1024, num_warps=8, num_stages=1)
buf391 = extern_kernels.convolution(buf385, buf390, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf391, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf392 = buf391
del buf391
triton_poi_fused_add_41[grid(262144)](buf392, buf363, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf393 = buf382
del buf382
buf394 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf396 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_38[grid(128)](buf392, buf393,
buf394, buf396, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf397 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(262144)](buf392,
buf393, buf394, primals_114, primals_115, buf397, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_115
buf399 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf401 = reinterpret_tensor(buf399, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf399
buf402 = empty_strided_cuda((8192, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_42[grid(8192)](buf401,
primals_116, buf402, 8192, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf403 = extern_kernels.convolution(buf397, buf402, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf403, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf405 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf407 = reinterpret_tensor(buf405, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf405
buf408 = empty_strided_cuda((2048, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(2048)](buf407,
primals_117, buf408, 2048, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf409 = extern_kernels.convolution(buf397, buf408, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf409, (4, 2048, 4, 4), (32768, 1, 8192, 2048))
buf410 = buf394
del buf394
buf411 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf413 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_44[grid(128)](buf409, buf410,
buf411, buf413, 128, 1024, num_warps=8, num_stages=1)
buf414 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_45[grid(131072)](buf409,
buf410, buf411, primals_118, primals_119, buf414, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_119
buf415 = extern_kernels.convolution(buf414, buf14, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf415, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf416 = buf411
del buf411
buf417 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf415, buf416,
buf417, buf419, 128, 256, num_warps=2, num_stages=1)
buf420 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf415,
buf416, buf417, primals_121, primals_122, buf420, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_122
buf422 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf424 = reinterpret_tensor(buf422, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf422
buf425 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_48[grid(8192)](buf424,
primals_123, buf425, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf426 = extern_kernels.convolution(buf420, buf425, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf426, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf427 = buf403
del buf403
triton_poi_fused_add_49[grid(131072)](buf427, buf426, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf428 = buf417
del buf417
buf429 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf431 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf427, buf428,
buf429, buf431, 128, 1024, num_warps=8, num_stages=1)
buf432 = buf426
del buf426
triton_poi_fused_native_group_norm_relu_51[grid(131072)](buf427,
buf428, buf429, primals_124, primals_125, buf432, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_125
buf434 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf436 = reinterpret_tensor(buf434, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf434
buf437 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192,
8192), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(2048)](buf436,
primals_126, buf437, 2048, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf438 = extern_kernels.convolution(buf432, buf437, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf438, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf439 = buf429
del buf429
buf440 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf442 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf438, buf439,
buf440, buf442, 128, 256, num_warps=2, num_stages=1)
buf443 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf438,
buf439, buf440, primals_127, primals_128, buf443, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_128
buf444 = extern_kernels.convolution(buf443, buf15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf444, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf445 = buf440
del buf440
buf446 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf448 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf444, buf445,
buf446, buf448, 128, 256, num_warps=2, num_stages=1)
buf449 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf444,
buf445, buf446, primals_130, primals_131, buf449, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_131
buf451 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf453 = reinterpret_tensor(buf451, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf451
buf454 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_48[grid(8192)](buf453,
primals_132, buf454, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf455 = extern_kernels.convolution(buf449, buf454, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf455, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf456 = buf455
del buf455
triton_poi_fused_add_53[grid(131072)](buf456, buf427, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf457 = buf446
del buf446
buf458 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf460 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf456, buf457,
buf458, buf460, 128, 1024, num_warps=8, num_stages=1)
buf461 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192
), torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(131072)](buf456,
buf457, buf458, primals_133, primals_134, buf461, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_134
buf463 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf465 = reinterpret_tensor(buf463, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf463
buf466 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192,
8192), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(2048)](buf465,
primals_135, buf466, 2048, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf467 = extern_kernels.convolution(buf461, buf466, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf467, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf468 = buf458
del buf458
buf469 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf471 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf467, buf468,
buf469, buf471, 128, 256, num_warps=2, num_stages=1)
buf472 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf467,
buf468, buf469, primals_136, primals_137, buf472, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_137
buf473 = extern_kernels.convolution(buf472, buf16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf473, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf474 = buf469
del buf469
buf475 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf477 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf473, buf474,
buf475, buf477, 128, 256, num_warps=2, num_stages=1)
buf478 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf473,
buf474, buf475, primals_139, primals_140, buf478, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_140
buf480 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf482 = reinterpret_tensor(buf480, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf480
buf483 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_48[grid(8192)](buf482,
primals_141, buf483, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf484 = extern_kernels.convolution(buf478, buf483, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf484, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf485 = buf484
del buf484
triton_poi_fused_add_53[grid(131072)](buf485, buf456, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf486 = buf475
del buf475
buf487 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf489 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf485, buf486,
buf487, buf489, 128, 1024, num_warps=8, num_stages=1)
buf490 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192
), torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(131072)](buf485,
buf486, buf487, primals_142, primals_143, buf490, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_143
buf492 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf494 = reinterpret_tensor(buf492, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf492
buf495 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192,
8192), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(2048)](buf494,
primals_144, buf495, 2048, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf496 = extern_kernels.convolution(buf490, buf495, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf496, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf497 = buf487
del buf487
buf498 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf500 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf496, buf497,
buf498, buf500, 128, 256, num_warps=2, num_stages=1)
buf501 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf496,
buf497, buf498, primals_145, primals_146, buf501, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_146
buf502 = extern_kernels.convolution(buf501, buf17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf502, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf503 = buf498
del buf498
buf504 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf506 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_46[grid(128)](buf502, buf503,
buf504, buf506, 128, 256, num_warps=2, num_stages=1)
buf507 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_47[grid(32768)](buf502,
buf503, buf504, primals_148, primals_149, buf507, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_149
buf509 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf511 = reinterpret_tensor(buf509, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf509
buf512 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_48[grid(8192)](buf511,
primals_150, buf512, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf513 = extern_kernels.convolution(buf507, buf512, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf513, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf514 = buf513
del buf513
triton_poi_fused_add_53[grid(131072)](buf514, buf485, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf515 = reinterpret_tensor(buf504, (4, 32, 1, 1), (32, 1, 32, 32), 0)
del buf504
buf516 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf518 = reinterpret_tensor(buf516, (4, 32, 1, 1), (32, 1, 32, 32), 0)
del buf516
triton_per_fused_native_group_norm_54[grid(128)](buf518, buf514,
buf515, 128, 1024, num_warps=8, num_stages=1)
buf519 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192),
torch.float32)
triton_poi_fused_mean_native_group_norm_relu_55[grid(32768)](buf514,
buf515, buf518, primals_151, primals_152, buf519, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
buf520 = extern_kernels.convolution(buf519, primals_153, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf520, (4, 21843, 1, 1), (21843, 1, 21843, 21843))
buf521 = reinterpret_tensor(buf520, (4, 21843, 1, 1), (21843, 1,
87372, 87372), 0)
del buf520
triton_poi_fused_convolution_56[grid(87372)](buf521, primals_154,
87372, XBLOCK=512, num_warps=8, num_stages=1)
del primals_154
return (reinterpret_tensor(buf521, (4, 21843), (21843, 1), 0), buf0,
buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_10,
primals_12, primals_13, primals_15, primals_16, buf3, primals_19,
primals_21, primals_22, primals_24, primals_25, buf4, primals_28,
primals_30, primals_31, primals_33, primals_34, buf5, primals_37,
primals_39, primals_40, primals_42, primals_43, primals_44, buf6,
primals_47, primals_49, primals_50, primals_52, primals_53, buf7,
primals_56, primals_58, primals_59, primals_61, primals_62, buf8,
primals_65, primals_67, primals_68, primals_70, primals_71, buf9,
primals_74, primals_76, primals_77, primals_79, primals_80,
primals_81, buf10, primals_84, primals_86, primals_87, primals_89,
primals_90, buf11, primals_93, primals_95, primals_96, primals_98,
primals_99, buf12, primals_102, primals_104, primals_105,
primals_107, primals_108, buf13, primals_111, primals_113,
primals_114, primals_116, primals_117, primals_118, buf14,
primals_121, primals_123, primals_124, primals_126, primals_127,
buf15, primals_130, primals_132, primals_133, primals_135,
primals_136, buf16, primals_139, primals_141, primals_142,
primals_144, primals_145, buf17, primals_148, primals_150,
primals_151, primals_152, primals_153, buf21, buf22, buf24, buf25,
buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0),
reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf35, buf36,
buf41, buf42, buf43, reinterpret_tensor(buf44, (4, 32), (32, 1), 0),
reinterpret_tensor(buf47, (4, 32), (32, 1), 0), buf48, buf49,
reinterpret_tensor(buf50, (4, 32), (32, 1), 0), reinterpret_tensor(
buf53, (4, 32), (32, 1), 0), buf54, buf58, buf59, buf61,
reinterpret_tensor(buf62, (4, 32), (32, 1), 0), reinterpret_tensor(
buf65, (4, 32), (32, 1), 0), buf66, buf70, buf71, buf72,
reinterpret_tensor(buf73, (4, 32), (32, 1), 0), reinterpret_tensor(
buf76, (4, 32), (32, 1), 0), buf77, buf78, reinterpret_tensor(buf79,
(4, 32), (32, 1), 0), reinterpret_tensor(buf82, (4, 32), (32, 1), 0
), buf83, buf87, buf88, buf90, reinterpret_tensor(buf91, (4, 32), (
32, 1), 0), reinterpret_tensor(buf94, (4, 32), (32, 1), 0), buf95,
buf99, buf100, buf101, reinterpret_tensor(buf102, (4, 32), (32, 1),
0), reinterpret_tensor(buf105, (4, 32), (32, 1), 0), buf106, buf107,
reinterpret_tensor(buf108, (4, 32), (32, 1), 0), reinterpret_tensor
(buf111, (4, 32), (32, 1), 0), buf112, buf116, buf117, buf119,
reinterpret_tensor(buf120, (4, 32), (32, 1), 0), reinterpret_tensor
(buf123, (4, 32), (32, 1), 0), buf124, buf128, buf129, buf130,
reinterpret_tensor(buf131, (4, 32), (32, 1), 0), reinterpret_tensor
(buf134, (4, 32), (32, 1), 0), buf135, buf136, reinterpret_tensor(
buf137, (4, 32), (32, 1), 0), reinterpret_tensor(buf140, (4, 32), (
32, 1), 0), buf141, buf145, buf146, buf148, reinterpret_tensor(
buf149, (4, 32), (32, 1), 0), reinterpret_tensor(buf152, (4, 32), (
32, 1), 0), buf153, buf157, buf158, buf163, buf164, buf165,
reinterpret_tensor(buf166, (4, 32), (32, 1), 0), reinterpret_tensor
(buf169, (4, 32), (32, 1), 0), buf170, buf171, reinterpret_tensor(
buf172, (4, 32), (32, 1), 0), reinterpret_tensor(buf175, (4, 32), (
32, 1), 0), buf176, buf180, buf181, buf183, reinterpret_tensor(
buf184, (4, 32), (32, 1), 0), reinterpret_tensor(buf187, (4, 32), (
32, 1), 0), buf188, buf192, buf193, buf194, reinterpret_tensor(
buf195, (4, 32), (32, 1), 0), reinterpret_tensor(buf198, (4, 32), (
32, 1), 0), buf199, buf200, reinterpret_tensor(buf201, (4, 32), (32,
1), 0), reinterpret_tensor(buf204, (4, 32), (32, 1), 0), buf205,
buf209, buf210, buf212, reinterpret_tensor(buf213, (4, 32), (32, 1),
0), reinterpret_tensor(buf216, (4, 32), (32, 1), 0), buf217, buf221,
buf222, buf223, reinterpret_tensor(buf224, (4, 32), (32, 1), 0),
reinterpret_tensor(buf227, (4, 32), (32, 1), 0), buf228, buf229,
reinterpret_tensor(buf230, (4, 32), (32, 1), 0), reinterpret_tensor
(buf233, (4, 32), (32, 1), 0), buf234, buf238, buf239, buf241,
reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor
(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252,
reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor
(buf256, (4, 32), (32, 1), 0), buf257, buf258, reinterpret_tensor(
buf259, (4, 32), (32, 1), 0), reinterpret_tensor(buf262, (4, 32), (
32, 1), 0), buf263, buf267, buf268, buf270, reinterpret_tensor(
buf271, (4, 32), (32, 1), 0), reinterpret_tensor(buf274, (4, 32), (
32, 1), 0), buf275, buf279, buf280, buf285, buf286, buf287,
reinterpret_tensor(buf288, (4, 32), (32, 1), 0), reinterpret_tensor
(buf291, (4, 32), (32, 1), 0), buf292, buf293, reinterpret_tensor(
buf294, (4, 32), (32, 1), 0), reinterpret_tensor(buf297, (4, 32), (
32, 1), 0), buf298, buf302, buf303, buf305, reinterpret_tensor(
buf306, (4, 32), (32, 1), 0), reinterpret_tensor(buf309, (4, 32), (
32, 1), 0), buf310, buf314, buf315, buf316, reinterpret_tensor(
buf317, (4, 32), (32, 1), 0), reinterpret_tensor(buf320, (4, 32), (
32, 1), 0), buf321, buf322, reinterpret_tensor(buf323, (4, 32), (32,
1), 0), reinterpret_tensor(buf326, (4, 32), (32, 1), 0), buf327,
buf331, buf332, buf334, reinterpret_tensor(buf335, (4, 32), (32, 1),
0), reinterpret_tensor(buf338, (4, 32), (32, 1), 0), buf339, buf343,
buf344, buf345, reinterpret_tensor(buf346, (4, 32), (32, 1), 0),
reinterpret_tensor(buf349, (4, 32), (32, 1), 0), buf350, buf351,
reinterpret_tensor(buf352, (4, 32), (32, 1), 0), reinterpret_tensor
(buf355, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf363,
reinterpret_tensor(buf364, (4, 32), (32, 1), 0), reinterpret_tensor
(buf367, (4, 32), (32, 1), 0), buf368, buf372, buf373, buf374,
reinterpret_tensor(buf375, (4, 32), (32, 1), 0), reinterpret_tensor
(buf378, (4, 32), (32, 1), 0), buf379, buf380, reinterpret_tensor(
buf381, (4, 32), (32, 1), 0), reinterpret_tensor(buf384, (4, 32), (
32, 1), 0), buf385, buf389, buf390, buf392, reinterpret_tensor(
buf393, (4, 32), (32, 1), 0), reinterpret_tensor(buf396, (4, 32), (
32, 1), 0), buf397, buf401, buf402, buf407, buf408, buf409,
reinterpret_tensor(buf410, (4, 32), (32, 1), 0), reinterpret_tensor
(buf413, (4, 32), (32, 1), 0), buf414, buf415, reinterpret_tensor(
buf416, (4, 32), (32, 1), 0), reinterpret_tensor(buf419, (4, 32), (
32, 1), 0), buf420, buf424, buf425, buf427, reinterpret_tensor(
buf428, (4, 32), (32, 1), 0), reinterpret_tensor(buf431, (4, 32), (
32, 1), 0), buf432, buf436, buf437, buf438, reinterpret_tensor(
buf439, (4, 32), (32, 1), 0), reinterpret_tensor(buf442, (4, 32), (
32, 1), 0), buf443, buf444, reinterpret_tensor(buf445, (4, 32), (32,
1), 0), reinterpret_tensor(buf448, (4, 32), (32, 1), 0), buf449,
buf453, buf454, buf456, reinterpret_tensor(buf457, (4, 32), (32, 1),
0), reinterpret_tensor(buf460, (4, 32), (32, 1), 0), buf461, buf465,
buf466, buf467, reinterpret_tensor(buf468, (4, 32), (32, 1), 0),
reinterpret_tensor(buf471, (4, 32), (32, 1), 0), buf472, buf473,
reinterpret_tensor(buf474, (4, 32), (32, 1), 0), reinterpret_tensor
(buf477, (4, 32), (32, 1), 0), buf478, buf482, buf483, buf485,
reinterpret_tensor(buf486, (4, 32), (32, 1), 0), reinterpret_tensor
(buf489, (4, 32), (32, 1), 0), buf490, buf494, buf495, buf496,
reinterpret_tensor(buf497, (4, 32), (32, 1), 0), reinterpret_tensor
(buf500, (4, 32), (32, 1), 0), buf501, buf502, reinterpret_tensor(
buf503, (4, 32), (32, 1), 0), reinterpret_tensor(buf506, (4, 32), (
32, 1), 0), buf507, buf511, buf512, buf514, buf515, buf518, buf519)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2New(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2,
block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=
128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2,
block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=
256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2,
block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid
=512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin
=2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2,
block_units[3] + 1)])))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 *
wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn.
AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf,
head_size, kernel_size=1, bias=True))]))
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[
f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[
f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[
f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[
f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def forward(self, input_0):
primals_1 = self.root.conv.weight
primals_3 = self.body.block1.unit01.gn1.weight
primals_4 = self.body.block1.unit01.gn1.bias
primals_6 = self.body.block1.unit01.conv1.weight
primals_7 = self.body.block1.unit01.gn2.weight
primals_8 = self.body.block1.unit01.gn2.bias
primals_9 = self.body.block1.unit01.conv2.weight
primals_10 = self.body.block1.unit01.gn3.weight
primals_11 = self.body.block1.unit01.gn3.bias
primals_5 = self.body.block1.unit01.conv3.weight
primals_12 = self.body.block1.unit01.downsample.weight
primals_13 = self.body.block1.unit02.gn1.weight
primals_14 = self.body.block1.unit02.gn1.bias
primals_15 = self.body.block1.unit02.conv1.weight
primals_16 = self.body.block1.unit02.gn2.weight
primals_17 = self.body.block1.unit02.gn2.bias
primals_18 = self.body.block1.unit02.conv2.weight
primals_19 = self.body.block1.unit02.gn3.weight
primals_20 = self.body.block1.unit02.gn3.bias
primals_21 = self.body.block1.unit02.conv3.weight
primals_22 = self.body.block1.unit03.gn1.weight
primals_23 = self.body.block1.unit03.gn1.bias
primals_24 = self.body.block1.unit03.conv1.weight
primals_25 = self.body.block1.unit03.gn2.weight
primals_26 = self.body.block1.unit03.gn2.bias
primals_27 = self.body.block1.unit03.conv2.weight
primals_28 = self.body.block1.unit03.gn3.weight
primals_29 = self.body.block1.unit03.gn3.bias
primals_30 = self.body.block1.unit03.conv3.weight
primals_31 = self.body.block1.unit04.gn1.weight
primals_32 = self.body.block1.unit04.gn1.bias
primals_33 = self.body.block1.unit04.conv1.weight
primals_34 = self.body.block1.unit04.gn2.weight
primals_35 = self.body.block1.unit04.gn2.bias
primals_36 = self.body.block1.unit04.conv2.weight
primals_37 = self.body.block1.unit04.gn3.weight
primals_38 = self.body.block1.unit04.gn3.bias
primals_39 = self.body.block1.unit04.conv3.weight
primals_40 = self.body.block2.unit01.gn1.weight
primals_41 = self.body.block2.unit01.gn1.bias
primals_43 = self.body.block2.unit01.conv1.weight
primals_44 = self.body.block2.unit01.gn2.weight
primals_45 = self.body.block2.unit01.gn2.bias
primals_46 = self.body.block2.unit01.conv2.weight
primals_47 = self.body.block2.unit01.gn3.weight
primals_48 = self.body.block2.unit01.gn3.bias
primals_49 = self.body.block2.unit01.conv3.weight
primals_42 = self.body.block2.unit01.downsample.weight
primals_50 = self.body.block2.unit02.gn1.weight
primals_51 = self.body.block2.unit02.gn1.bias
primals_52 = self.body.block2.unit02.conv1.weight
primals_53 = self.body.block2.unit02.gn2.weight
primals_54 = self.body.block2.unit02.gn2.bias
primals_55 = self.body.block2.unit02.conv2.weight
primals_56 = self.body.block2.unit02.gn3.weight
primals_57 = self.body.block2.unit02.gn3.bias
primals_58 = self.body.block2.unit02.conv3.weight
primals_59 = self.body.block2.unit03.gn1.weight
primals_60 = self.body.block2.unit03.gn1.bias
primals_61 = self.body.block2.unit03.conv1.weight
primals_62 = self.body.block2.unit03.gn2.weight
primals_63 = self.body.block2.unit03.gn2.bias
primals_64 = self.body.block2.unit03.conv2.weight
primals_65 = self.body.block2.unit03.gn3.weight
primals_66 = self.body.block2.unit03.gn3.bias
primals_67 = self.body.block2.unit03.conv3.weight
primals_68 = self.body.block2.unit04.gn1.weight
primals_69 = self.body.block2.unit04.gn1.bias
primals_70 = self.body.block2.unit04.conv1.weight
primals_71 = self.body.block2.unit04.gn2.weight
primals_72 = self.body.block2.unit04.gn2.bias
primals_73 = self.body.block2.unit04.conv2.weight
primals_74 = self.body.block2.unit04.gn3.weight
primals_75 = self.body.block2.unit04.gn3.bias
primals_76 = self.body.block2.unit04.conv3.weight
primals_77 = self.body.block3.unit01.gn1.weight
primals_78 = self.body.block3.unit01.gn1.bias
primals_80 = self.body.block3.unit01.conv1.weight
primals_81 = self.body.block3.unit01.gn2.weight
primals_82 = self.body.block3.unit01.gn2.bias
primals_83 = self.body.block3.unit01.conv2.weight
primals_84 = self.body.block3.unit01.gn3.weight
primals_85 = self.body.block3.unit01.gn3.bias
primals_86 = self.body.block3.unit01.conv3.weight
primals_79 = self.body.block3.unit01.downsample.weight
primals_87 = self.body.block3.unit02.gn1.weight
primals_88 = self.body.block3.unit02.gn1.bias
primals_89 = self.body.block3.unit02.conv1.weight
primals_90 = self.body.block3.unit02.gn2.weight
primals_91 = self.body.block3.unit02.gn2.bias
primals_92 = self.body.block3.unit02.conv2.weight
primals_93 = self.body.block3.unit02.gn3.weight
primals_94 = self.body.block3.unit02.gn3.bias
primals_95 = self.body.block3.unit02.conv3.weight
primals_96 = self.body.block3.unit03.gn1.weight
primals_97 = self.body.block3.unit03.gn1.bias
primals_98 = self.body.block3.unit03.conv1.weight
primals_99 = self.body.block3.unit03.gn2.weight
primals_100 = self.body.block3.unit03.gn2.bias
primals_101 = self.body.block3.unit03.conv2.weight
primals_102 = self.body.block3.unit03.gn3.weight
primals_103 = self.body.block3.unit03.gn3.bias
primals_104 = self.body.block3.unit03.conv3.weight
primals_105 = self.body.block3.unit04.gn1.weight
primals_106 = self.body.block3.unit04.gn1.bias
primals_107 = self.body.block3.unit04.conv1.weight
primals_108 = self.body.block3.unit04.gn2.weight
primals_109 = self.body.block3.unit04.gn2.bias
primals_110 = self.body.block3.unit04.conv2.weight
primals_111 = self.body.block3.unit04.gn3.weight
primals_112 = self.body.block3.unit04.gn3.bias
primals_113 = self.body.block3.unit04.conv3.weight
primals_114 = self.body.block4.unit01.gn1.weight
primals_115 = self.body.block4.unit01.gn1.bias
primals_117 = self.body.block4.unit01.conv1.weight
primals_118 = self.body.block4.unit01.gn2.weight
primals_119 = self.body.block4.unit01.gn2.bias
primals_120 = self.body.block4.unit01.conv2.weight
primals_121 = self.body.block4.unit01.gn3.weight
primals_122 = self.body.block4.unit01.gn3.bias
primals_123 = self.body.block4.unit01.conv3.weight
primals_116 = self.body.block4.unit01.downsample.weight
primals_124 = self.body.block4.unit02.gn1.weight
primals_125 = self.body.block4.unit02.gn1.bias
primals_126 = self.body.block4.unit02.conv1.weight
primals_127 = self.body.block4.unit02.gn2.weight
primals_128 = self.body.block4.unit02.gn2.bias
primals_129 = self.body.block4.unit02.conv2.weight
primals_130 = self.body.block4.unit02.gn3.weight
primals_131 = self.body.block4.unit02.gn3.bias
primals_132 = self.body.block4.unit02.conv3.weight
primals_133 = self.body.block4.unit03.gn1.weight
primals_134 = self.body.block4.unit03.gn1.bias
primals_135 = self.body.block4.unit03.conv1.weight
primals_136 = self.body.block4.unit03.gn2.weight
primals_137 = self.body.block4.unit03.gn2.bias
primals_138 = self.body.block4.unit03.conv2.weight
primals_139 = self.body.block4.unit03.gn3.weight
primals_140 = self.body.block4.unit03.gn3.bias
primals_141 = self.body.block4.unit03.conv3.weight
primals_142 = self.body.block4.unit04.gn1.weight
primals_143 = self.body.block4.unit04.gn1.bias
primals_144 = self.body.block4.unit04.conv1.weight
primals_145 = self.body.block4.unit04.gn2.weight
primals_146 = self.body.block4.unit04.gn2.bias
primals_147 = self.body.block4.unit04.conv2.weight
primals_148 = self.body.block4.unit04.gn3.weight
primals_149 = self.body.block4.unit04.gn3.bias
primals_150 = self.body.block4.unit04.conv3.weight
primals_151 = self.head.gn.weight
primals_152 = self.head.gn.bias
primals_153 = self.head.conv.weight
primals_154 = self.head.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154])
return output[0]
|
HelenR6/imagenet-r
|
ResNetV2
| false
| 14,853
|
[
"MIT"
] | 155
|
0bf04f2bf5d60d1098fc9a78f4e8c042e434eb69
|
https://github.com/HelenR6/imagenet-r/tree/0bf04f2bf5d60d1098fc9a78f4e8c042e434eb69
|
RMSE
|
import torch
import torch.nn as nn
class RMSE(nn.Module):
def __init__(self):
super(RMSE, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float() * (outputs > 0).float()
err = (target * val_pixels - outputs * val_pixels) ** 2
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1,
keepdim=True)
return torch.mean(torch.sqrt(loss / cnt)) * 1000
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp4 > tmp1
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tmp0 * tmp7
tmp9 = tmp4 * tmp7
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp15, xmask)
tl.store(out_ptr1 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused_div_mean_mul_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 1000.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_div_mean_mul_sqrt_1[grid(1)](buf3, buf0, buf1, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class RMSENew(nn.Module):
def __init__(self):
super(RMSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anglixjtu/MSG_CHN_WACV20
|
RMSE
| false
| 14,854
|
[
"Apache-2.0"
] | 61
|
6910894cf3caed2ffde27586f96b132b0c1d1a98
|
https://github.com/anglixjtu/MSG_CHN_WACV20/tree/6910894cf3caed2ffde27586f96b132b0c1d1a98
|
SequenceBias
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from torch.nn.parameter import Parameter
class SequenceBias(nn.Module):
"""
Adds one bias element to the end of the sequence.
so if the input has a shape ``(L, N, E)``, where
``L`` is the sequence length, ``N`` is the batch size, and ``E`` is
the embedding dimension, the output will have a shape
``(L+1, N, E)``.
Attributes:
bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of
the module of shape ``(E)``, where ``E`` is the embedding dimension.
Example:
>>> m = SequenceBias(16)
>>> input = torch.randn(20, 4, 16)
>>> output = m(input)
>>> print(output.size())
torch.Size([21, 4, 16])
"""
def __init__(self, embed_dim: 'int'):
"""
Args:
embed_dim: Embedding dimension
"""
super(SequenceBias, self).__init__()
self.bias = Parameter(torch.empty(embed_dim))
self._reset_parameters()
def _reset_parameters(self):
"""
assigns Normally distributed random values to bias.
"""
nn.init.normal_(self.bias)
def forward(self, x):
_, bsz, _ = x.shape
return torch.cat([x, self.bias.repeat(1, bsz, 1)])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, primals_2, buf0, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class SequenceBiasNew(nn.Module):
"""
Adds one bias element to the end of the sequence.
so if the input has a shape ``(L, N, E)``, where
``L`` is the sequence length, ``N`` is the batch size, and ``E`` is
the embedding dimension, the output will have a shape
``(L+1, N, E)``.
Attributes:
bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of
the module of shape ``(E)``, where ``E`` is the embedding dimension.
Example:
>>> m = SequenceBias(16)
>>> input = torch.randn(20, 4, 16)
>>> output = m(input)
>>> print(output.size())
torch.Size([21, 4, 16])
"""
def __init__(self, embed_dim: 'int'):
"""
Args:
embed_dim: Embedding dimension
"""
super(SequenceBiasNew, self).__init__()
self.bias = Parameter(torch.empty(embed_dim))
self._reset_parameters()
def _reset_parameters(self):
"""
assigns Normally distributed random values to bias.
"""
nn.init.normal_(self.bias)
def forward(self, input_0):
primals_2 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
anibadde/opacus
|
SequenceBias
| false
| 14,855
|
[
"Apache-2.0"
] | 958
|
be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
https://github.com/anibadde/opacus/tree/be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
iMAE
|
import torch
import torch.nn as nn
class iMAE(nn.Module):
def __init__(self):
super(iMAE, self).__init__()
def forward(self, outputs, target, *args):
outputs = outputs / 1000.0
target = target / 1000.0
outputs[outputs == 0] = -1
target[target == 0] = -1
outputs = 1.0 / outputs
target = 1.0 / target
outputs[outputs == -1] = 0
target[target == -1] = 0
val_pixels = (target > 0).float()
err = torch.abs(target * val_pixels - outputs * val_pixels)
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1,
keepdim=True)
return torch.mean(loss / cnt)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_index_put_lift_fresh_mul_reciprocal_sum_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp13 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 0.001
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 == tmp3
tmp5 = -1.0
tmp6 = tl.where(tmp4, tmp5, tmp2)
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tmp10 == tmp5
tmp12 = tl.where(tmp11, tmp3, tmp10)
tmp14 = tmp13 * tmp1
tmp15 = tmp14 == tmp3
tmp16 = tl.where(tmp15, tmp5, tmp14)
tmp17 = tmp7 / tmp16
tmp18 = tmp17 * tmp9
tmp19 = tmp18 == tmp5
tmp20 = tl.where(tmp19, tmp3, tmp18)
tmp21 = tmp12 > tmp3
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp12 * tmp22
tmp24 = tmp20 * tmp22
tmp25 = tmp23 - tmp24
tmp26 = tl_math.abs(tmp25)
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tmp31 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp33 = tl.where(xmask, tmp31, 0)
tmp34 = tl.sum(tmp33, 1)[:, None]
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(out_ptr1 + x0, tmp34, xmask)
@triton.jit
def triton_per_fused_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = 4.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_div_index_put_lift_fresh_mul_reciprocal_sum_0[grid(4)
](arg1_1, arg0_1, buf4, buf5, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_div_mean_1[grid(1)](buf7, buf4, buf5, 1, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf4
del buf5
return buf7,
class iMAENew(nn.Module):
def __init__(self):
super(iMAENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anglixjtu/MSG_CHN_WACV20
|
iMAE
| false
| 14,856
|
[
"Apache-2.0"
] | 61
|
6910894cf3caed2ffde27586f96b132b0c1d1a98
|
https://github.com/anglixjtu/MSG_CHN_WACV20/tree/6910894cf3caed2ffde27586f96b132b0c1d1a98
|
ResNetBlockGroupNorm
|
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResNetBlockGroupNorm(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1, activation=
'relu'):
super(ResNetBlockGroupNorm, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride, bias=False), nn.GroupNorm(
num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3,
primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid
(4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12,
buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6,
buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0),
reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6,
reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor(
buf10, (4, 1), (1, 1), 0), buf12)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResNetBlockGroupNormNew(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1, activation=
'relu'):
super(ResNetBlockGroupNormNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride, bias=False), nn.GroupNorm(
num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.gn1.weight
primals_4 = self.gn1.bias
primals_5 = self.conv2.weight
primals_6 = self.gn2.weight
primals_7 = self.gn2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
andrecianflone/wolf
|
ResNetBlockGroupNorm
| false
| 14,857
|
[
"Apache-2.0"
] | 75
|
826bbedc58d4d29871110349356868066a3108e6
|
https://github.com/andrecianflone/wolf/tree/826bbedc58d4d29871110349356868066a3108e6
|
Swish
|
import torch
import torch.nn as nn
import torch.distributed
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return x * torch.sigmoid(self.beta * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp2 * tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class SwishNew(nn.Module):
def __init__(self):
super(SwishNew, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
anidnerocram/PointFlow
|
Swish
| false
| 14,858
|
[
"MIT"
] | 539
|
b9f82a5534fad830c99ba0a30f4f3320626f64f4
|
https://github.com/anidnerocram/PointFlow/tree/b9f82a5534fad830c99ba0a30f4f3320626f64f4
|
iRMSE
|
import torch
import torch.nn as nn
class iRMSE(nn.Module):
def __init__(self):
super(iRMSE, self).__init__()
def forward(self, outputs, target, *args):
outputs = outputs / 1000.0
target = target / 1000.0
outputs[outputs == 0] = -1
target[target == 0] = -1
outputs = 1.0 / outputs
target = 1.0 / target
outputs[outputs == -1] = 0
target[target == -1] = 0
val_pixels = (target > 0).float()
err = (target * val_pixels - outputs * val_pixels) ** 2
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1,
keepdim=True)
return torch.mean(torch.sqrt(loss / cnt))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_index_put_lift_fresh_mul_reciprocal_sum_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp13 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 0.001
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 == tmp3
tmp5 = -1.0
tmp6 = tl.where(tmp4, tmp5, tmp2)
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp7 / tmp6
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tmp10 == tmp5
tmp12 = tl.where(tmp11, tmp3, tmp10)
tmp14 = tmp13 * tmp1
tmp15 = tmp14 == tmp3
tmp16 = tl.where(tmp15, tmp5, tmp14)
tmp17 = tmp7 / tmp16
tmp18 = tmp17 * tmp9
tmp19 = tmp18 == tmp5
tmp20 = tl.where(tmp19, tmp3, tmp18)
tmp21 = tmp12 > tmp3
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp12 * tmp22
tmp24 = tmp20 * tmp22
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tmp31 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp33 = tl.where(xmask, tmp31, 0)
tmp34 = tl.sum(tmp33, 1)[:, None]
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(out_ptr1 + x0, tmp34, xmask)
@triton.jit
def triton_per_fused_div_mean_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_div_index_put_lift_fresh_mul_reciprocal_sum_0[grid(4)
](arg1_1, arg0_1, buf4, buf5, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_div_mean_sqrt_1[grid(1)](buf7, buf4, buf5, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf4
del buf5
return buf7,
class iRMSENew(nn.Module):
def __init__(self):
super(iRMSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anglixjtu/MSG_CHN_WACV20
|
iRMSE
| false
| 14,859
|
[
"Apache-2.0"
] | 61
|
6910894cf3caed2ffde27586f96b132b0c1d1a98
|
https://github.com/anglixjtu/MSG_CHN_WACV20/tree/6910894cf3caed2ffde27586f96b132b0c1d1a98
|
DPRNNCell
|
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPRNNCell(DPRNNCellBase):
"""An Elman RNN cell with tanh or ReLU non-linearity.
DP-friendly drop-in replacement of the ``torch.nn.RNNCell`` module to use in ``DPRNN``.
Refer to ``torch.nn.RNNCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
nonlinearity: 'str'='tanh') ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=1)
if nonlinearity not in ('tanh', 'relu'):
raise ValueError(f'Unsupported nonlinearity: {nonlinearity}')
self.nonlinearity = nonlinearity
def forward(self, input: 'Tensor', hx: 'Optional[Tensor]'=None,
batch_size_t: 'Optional[int]'=None) ->Tensor:
if hx is None:
hx = torch.zeros(input.shape[0], self.hidden_size, dtype=input.
dtype, device=input.device)
h_prev = hx
gates = self.ih(input) + self.hh(h_prev if batch_size_t is None else
h_prev[:batch_size_t, :])
if self.nonlinearity == 'tanh':
h_t = torch.tanh(gates)
elif self.nonlinearity == 'relu':
h_t = torch.relu(gates)
else:
raise RuntimeError(f'Unknown nonlinearity: {self.nonlinearity}')
return h_t
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x4 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(256)](buf3, primals_3, buf2,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_3
del primals_5
return buf3, buf0, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf3
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPRNNCellNew(DPRNNCellBase):
"""An Elman RNN cell with tanh or ReLU non-linearity.
DP-friendly drop-in replacement of the ``torch.nn.RNNCell`` module to use in ``DPRNN``.
Refer to ``torch.nn.RNNCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
nonlinearity: 'str'='tanh') ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=1)
if nonlinearity not in ('tanh', 'relu'):
raise ValueError(f'Unsupported nonlinearity: {nonlinearity}')
self.nonlinearity = nonlinearity
def forward(self, input_0):
primals_2 = self.ih.weight
primals_3 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
anibadde/opacus
|
DPRNNCell
| false
| 14,860
|
[
"Apache-2.0"
] | 958
|
be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
https://github.com/anibadde/opacus/tree/be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
JointsMSELoss
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
import torch.multiprocessing
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(size_average=True)
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1
)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight
[:, idx]), heatmap_gt.mul(target_weight[:, idx]))
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = tmp10 * tmp11
tmp14 = tmp13 * tmp11
tmp15 = tmp12 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp22 = tmp20 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp22 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp32 = tmp30 * tmp31
tmp34 = tmp33 * tmp31
tmp35 = tmp32 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.sum(tmp37, 1)[:, None]
tmp40 = 4.0
tmp41 = tmp9 / tmp40
tmp42 = 0.5
tmp43 = tmp41 * tmp42
tmp44 = 0.0
tmp45 = tmp43 + tmp44
tmp46 = tmp19 / tmp40
tmp47 = tmp46 * tmp42
tmp48 = tmp45 + tmp47
tmp49 = tmp29 / tmp40
tmp50 = tmp49 * tmp42
tmp51 = tmp48 + tmp50
tmp52 = tmp39 / tmp40
tmp53 = tmp52 * tmp42
tmp54 = tmp51 + tmp53
tmp55 = 0.25
tmp56 = tmp54 * tmp55
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf4, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf4,
class JointsMSELossNew(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELossNew, self).__init__()
self.criterion = nn.MSELoss(size_average=True)
self.use_target_weight = use_target_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
ankhzaya/HigherHRNet-Human-Pose-Estimation
|
JointsMSELoss
| false
| 14,861
|
[
"MIT"
] | 775
|
b4610aecaa5cf3de3cd69bfb13c7c79c8d514c7c
|
https://github.com/ankhzaya/HigherHRNet-Human-Pose-Estimation/tree/b4610aecaa5cf3de3cd69bfb13c7c79c8d514c7c
|
Cosine
|
from _paritybench_helpers import _mock_config
import torch
from torch.optim.lr_scheduler import *
class Cosine(torch.nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, src, tgt):
src = src.float()
tgt = tgt.float()
return (torch.matmul(src, tgt.transpose(2, 1)) / (src.norm(p=2, dim
=-1, keepdim=True) * tgt.norm(p=2, dim=-1, keepdim=True) + 1e-09)
).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config()}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_mul_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp11 * tmp23
tmp25 = 1e-09
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out
=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_linalg_vector_norm_mul_1[grid(64)](arg0_1,
arg1_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2[grid(256)](
buf3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return buf3,
class CosineNew(torch.nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
Cosine
| false
| 14,862
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
MseCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class MseCriterion(Criterion):
def __init__(self, alpha=1.0, name='MSE Regression Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
if weight:
loss = torch.mean(F.mse_loss(input.squeeze(), target, reduce=
False) * weight.reshape((target.shape[0], 1)))
else:
loss = F.mse_loss(input.squeeze(), target)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class MseCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='MSE Regression Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
MseCriterion
| false
| 14,863
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
HLCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class HLCriterion(Criterion):
def __init__(self, alpha=1.0, name='Hellinger Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1,
reduction='batchmean'):
"""input/target: logits
"""
input = input.float()
target = target.float()
si = F.softmax(target.detach(), dim=-1, dtype=torch.float32).sqrt_()
st = F.softmax(input.detach(), dim=-1, dtype=torch.float32).sqrt_()
loss = F.mse_loss(si, st)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_red_fused__softmax_mse_loss_mul_sqrt_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp23 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp11 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp10 / tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp9 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = _tmp23 + tmp22
_tmp23 = tl.where(rmask, tmp24, _tmp23)
tmp23 = tl.sum(_tmp23, 1)[:, None]
tmp25 = 256.0
tmp26 = tmp23 / tmp25
tmp27 = 1.0
tmp28 = tmp26 * tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_red_fused__softmax_mse_loss_mul_sqrt_1[grid(1)](buf4, buf0,
buf1, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1)
del buf0
del buf1
return buf4,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class HLCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='Hellinger Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
HLCriterion
| false
| 14,864
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
NsKlCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class NsKlCriterion(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = stable_kl(input, target.detach())
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp26 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1, 1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tmp21 = tmp20 - tmp19
tmp22 = tmp21 + tmp15
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 + tmp15
tmp41 = tmp17 / tmp40
tmp42 = tmp41 * tmp19
tmp43 = tmp42 - tmp19
tmp44 = tmp43 + tmp15
tmp45 = tl_math.log(tmp44)
tmp46 = -tmp45
tmp47 = tmp24 - tmp46
tmp48 = tmp14 * tmp47
tmp49 = 2.0
tmp50 = tmp48 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = _tmp52 + tmp51
_tmp52 = tl.where(rmask, tmp53, _tmp52)
tmp52 = tl.sum(_tmp52, 1)[:, None]
tmp54 = 0.015625
tmp55 = tmp52 * tmp54
tmp56 = 1.0
tmp57 = tmp55 * tmp56
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp57, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1[
grid(1)](buf5, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf2
return buf5,
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class NsKlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
NsKlCriterion
| false
| 14,865
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
DPGRUCell
|
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPGRUCell(DPRNNCellBase):
"""A gated recurrent unit (GRU) cell
DP-friendly drop-in replacement of the ``torch.nn.GRUCell`` module to use in ``DPGRU``.
Refer to ``torch.nn.GRUCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=3)
def forward(self, input: 'Tensor', hx: 'Optional[Tensor]'=None,
batch_size_t: 'Optional[int]'=None) ->Tensor:
if hx is None:
hx = torch.zeros(input.shape[0], self.hidden_size, dtype=input.
dtype, device=input.device)
h_prev = hx if batch_size_t is None else hx[:batch_size_t, :]
gates_x = self.ih(input)
gates_h = self.hh(h_prev)
r_t_input_x, z_t_input_x, n_t_input_x = torch.split(gates_x, self.
hidden_size, 1)
r_t_input_h, z_t_input_h, n_t_input_h = torch.split(gates_h, self.
hidden_size, 1)
r_t = torch.sigmoid(r_t_input_x + r_t_input_h)
z_t = torch.sigmoid(z_t_input_x + z_t_input_h)
n_t = torch.tanh(n_t_input_x + r_t * n_t_input_h)
h_t = (1 - z_t) * n_t + z_t * h_prev
return h_t
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp19 = 1.0
tmp20 = tmp19 - tmp5
tmp21 = tmp20 * tmp18
tmp22 = 0.0
tmp23 = tmp5 * tmp22
tmp24 = tmp21 + tmp23
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
tl.store(out_ptr3 + x2, tmp24, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(4, 12), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(16)](buf1,
primals_3, buf2, buf4, buf3, buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
del primals_3
return buf6, primals_1, buf0, reinterpret_tensor(buf2, (4, 4), (12, 1), 8
), buf3, buf4, buf5
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPGRUCellNew(DPRNNCellBase):
"""A gated recurrent unit (GRU) cell
DP-friendly drop-in replacement of the ``torch.nn.GRUCell`` module to use in ``DPGRU``.
Refer to ``torch.nn.GRUCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=3)
def forward(self, input_0):
primals_2 = self.ih.weight
primals_3 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
anibadde/opacus
|
DPGRUCell
| false
| 14,866
|
[
"Apache-2.0"
] | 958
|
be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
https://github.com/anibadde/opacus/tree/be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
EDMLoss
|
import torch
import torch.nn as nn
import torch.optim
class EDMLoss(nn.Module):
def __init__(self):
super(EDMLoss, self).__init__()
def forward(self, p_target: 'torch.Tensor', p_estimate: 'torch.Tensor'):
assert p_target.shape == p_estimate.shape
cdf_target = torch.cumsum(p_target, dim=1)
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = cdf_estimate - cdf_target
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff
), 2)))
return samplewise_emd.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl
.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp3, xmask)
@triton.jit
def triton_per_fused_abs_mean_pow_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = 1.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_0[grid(64)](arg1_1, buf0, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_cumsum_0[grid(64)](arg0_1, buf1, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_abs_mean_pow_sqrt_sub_1[grid(1)](buf3, buf0, buf1,
1, 256, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class EDMLossNew(nn.Module):
def __init__(self):
super(EDMLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ankerok1/nima.pytorch
|
EDMLoss
| false
| 14,867
|
[
"MIT"
] | 300
|
bbdbeeb8c22d880205a4fa35cfc2a533d064ee5d
|
https://github.com/ankerok1/nima.pytorch/tree/bbdbeeb8c22d880205a4fa35cfc2a533d064ee5d
|
KlCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class KlCriterion(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
F.softmax(target, dim=-1, dtype=torch.float32), reduction=
'batchmean')
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = _tmp34 + tmp33
_tmp34 = tl.where(rmask, tmp35, _tmp34)
tmp34 = tl.sum(_tmp34, 1)[:, None]
tmp36 = 0.25
tmp37 = tmp34 * tmp36
tmp38 = 1.0
tmp39 = tmp37 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8,
num_stages=1)
del buf0
del buf2
return buf4,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class KlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
KlCriterion
| false
| 14,868
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
JSCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class JSCriterion(Criterion):
def __init__(self, alpha=1.0, name='JS Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1,
reduction='batchmean'):
"""input/target: logits
"""
input = input.float()
target = target.float()
m = F.softmax(target.detach(), dim=-1, dtype=torch.float32
) + F.softmax(input.detach(), dim=-1, dtype=torch.float32)
m = 0.5 * m
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
m, reduction=reduction) + F.kl_div(F.log_softmax(target, dim=-1,
dtype=torch.float32), m, reduction=reduction)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp46 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp10 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp30 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp48 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp49 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = tmp8 + tmp17
tmp19 = 0.5
tmp20 = tmp18 * tmp19
tmp21 = libdevice.isnan(tmp20).to(tl.int1)
tmp22 = 0.0
tmp23 = tmp20 == tmp22
tmp24 = tl_math.log(tmp20)
tmp25 = tmp20 * tmp24
tmp26 = tl.where(tmp23, tmp22, tmp25)
tmp27 = float('nan')
tmp28 = tl.where(tmp21, tmp27, tmp26)
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp29 - tmp41
tmp43 = tmp20 * tmp42
tmp44 = tmp28 - tmp43
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = _tmp46 + tmp45
_tmp46 = tl.where(rmask, tmp47, _tmp46)
tmp50 = tl_math.exp(tmp49)
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp50 + tmp52
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp60 = tl_math.log(tmp59)
tmp61 = tmp48 - tmp60
tmp62 = tmp20 * tmp61
tmp63 = tmp28 - tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask, tmp66, _tmp65)
tmp46 = tl.sum(_tmp46, 1)[:, None]
tmp65 = tl.sum(_tmp65, 1)[:, None]
tmp67 = 0.25
tmp68 = tmp46 * tmp67
tmp69 = tmp65 * tmp67
tmp70 = tmp68 + tmp69
tmp71 = 1.0
tmp72 = tmp70 * tmp71
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp72, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0,
buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf1,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf7 = buf4
del buf4
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1[grid
(1)](buf7, buf0, buf1, buf3, buf5, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf1
del buf3
del buf5
return buf7,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class JSCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='JS Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
JSCriterion
| false
| 14,869
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
SymKlCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class SymKlCriterion(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1,
reduction='batchmean'):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
F.softmax(target.detach(), dim=-1, dtype=torch.float32),
reduction=reduction) + F.kl_div(F.log_softmax(target, dim=-1,
dtype=torch.float32), F.softmax(input.detach(), dim=-1, dtype=
torch.float32), reduction=reduction)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp37 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp52 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = _tmp34 + tmp33
_tmp34 = tl.where(rmask, tmp35, _tmp34)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = libdevice.isnan(tmp44).to(tl.int1)
tmp46 = tmp44 == tmp10
tmp47 = tl_math.log(tmp44)
tmp48 = tmp44 * tmp47
tmp49 = tl.where(tmp46, tmp10, tmp48)
tmp50 = tl.where(tmp45, tmp15, tmp49)
tmp53 = tl_math.exp(tmp52)
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp63 = tl_math.log(tmp62)
tmp64 = tmp51 - tmp63
tmp65 = tmp44 * tmp64
tmp66 = tmp50 - tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = _tmp68 + tmp67
_tmp68 = tl.where(rmask, tmp69, _tmp68)
tmp34 = tl.sum(_tmp34, 1)[:, None]
tmp68 = tl.sum(_tmp68, 1)[:, None]
tmp70 = 0.25
tmp71 = tmp34 * tmp70
tmp72 = tmp68 * tmp70
tmp73 = tmp71 + tmp72
tmp74 = 1.0
tmp75 = tmp73 * tmp74
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp75, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0,
buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_1[grid(256)](arg0_1, buf2,
buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3
del buf3
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2[grid
(1)](buf8, buf0, buf2, buf4, buf6, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf2
del buf4
del buf6
return buf8,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class SymKlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
SymKlCriterion
| false
| 14,870
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
MultiheadAttentionWrapper
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class MultiheadAttentionWrapper(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size = self.qkv_dim[2]
def forward(self, query, key, value, key_padding_mask=None):
query = query.transpose(0, 1)
key = key.transpose(0, 1)
value = value.transpose(0, 1)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.qkv_dim[0]
q, k, v = query, key, value
if self.proj_on:
if self.dropout:
q, k = self.dropout(q), self.dropout(k)
q, k = [self.f(proj(input)) for input, proj in zip([query, key],
self.proj_modules)]
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.scale_on:
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.
qkv_head_dim[0]).transpose(0, 1)
k = k.contiguous().view(src_len, bsz * self.num_heads, self.
qkv_head_dim[1]).transpose(0, 1)
v = v.contiguous().view(src_len, bsz * self.num_heads, self.
qkv_head_dim[2]).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.float().masked_fill(key_padding_mask
.unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if self.drop_diagonal:
assert attn_weights.size(1) == attn_weights.size(2)
diag_mask = torch.diag(attn_weights.data.new(attn_weights.size(
1)).zero_() + 1).byte().unsqueeze(0).expand_as(attn_weights)
attn_weights.data.masked_fill_(diag_mask, -float('inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = self.dropout(attn_weights)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
qkv_head_dim[2]]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, -1)
attn = attn.transpose(0, 1)
return attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'query_dim': 4, 'key_dim': 4, 'value_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp0, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
triton_poi_fused_0[grid(64)](arg1_1, buf1, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
triton_poi_fused_0[grid(64)](arg2_1, buf2, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg2_1
buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(
buf0, buf1, buf2, None, False, scale=1.0)
del buf0
del buf1
del buf2
buf4 = buf3[0]
del buf3
return reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0),
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class MultiheadAttentionWrapperNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size = self.qkv_dim[2]
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
anlewy/mt-dnn
|
MultiheadAttentionWrapper
| false
| 14,871
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
DPLSTMCell
|
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Tuple
from typing import Optional
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPLSTMCell(DPRNNCellBase):
"""A long short-term memory (LSTM) cell.
DP-friendly drop-in replacement of the ``torch.nn.LSTMCell`` module to use in ``DPLSTM``.
Refer to ``torch.nn.LSTMCell`` documentation for the model description, parameters and inputs/outputs.
"""
has_cell_state = True
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=4)
def forward(self, input: 'Tensor', hx:
'Optional[Tuple[Tensor, Tensor]]'=None, batch_size_t:
'Optional[int]'=None) ->Tuple[Tensor, Tensor]:
if hx is None:
zeros = torch.zeros(input.shape[0], self.hidden_size, dtype=
input.dtype, device=input.device)
hx = zeros, zeros
h_prev, c_prev = hx
if batch_size_t is None:
gates = self.ih(input) + self.hh(h_prev)
else:
gates = self.ih(input) + self.hh(h_prev[:batch_size_t, :])
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates,
self.hidden_size, 1)
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = 0.0
tmp33 = tmp31 * tmp32
tmp34 = tmp7 * tmp23
tmp35 = tmp33 + tmp34
tmp36 = 1.0
tmp37 = tmp36 - tmp31
tmp38 = tmp31 * tmp37
tmp39 = libdevice.tanh(tmp35)
tmp40 = tmp15 * tmp39
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp23, xmask)
tl.store(out_ptr3 + x2, tmp35, xmask)
tl.store(out_ptr4 + x2, tmp38, xmask)
tl.store(out_ptr5 + x2, tmp40, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 16), (1,
4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1
, primals_3, buf2, primals_5, buf3, buf5, buf4, buf6, buf8,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
del primals_5
return buf7, buf6, primals_1, buf0, buf3, buf4, buf5, buf6, buf8
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPLSTMCellNew(DPRNNCellBase):
"""A long short-term memory (LSTM) cell.
DP-friendly drop-in replacement of the ``torch.nn.LSTMCell`` module to use in ``DPLSTM``.
Refer to ``torch.nn.LSTMCell`` documentation for the model description, parameters and inputs/outputs.
"""
has_cell_state = True
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=4)
def forward(self, input_0):
primals_2 = self.ih.weight
primals_3 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
anibadde/opacus
|
DPLSTMCell
| false
| 14,872
|
[
"Apache-2.0"
] | 958
|
be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
https://github.com/anibadde/opacus/tree/be221231e1b579bdae4ad34c8ae0c7c4928cee25
|
Clump
|
import torch
from torch import nn
class Clump(nn.Module):
"""Clipping input tensor."""
def __init__(self, min_v: 'int'=-50, max_v: 'int'=50):
"""Class for preparing input for DL model with mixed data.
Args:
min_v: Min value.
max_v: Max value.
"""
super(Clump, self).__init__()
self.min_v = min_v
self.max_v = max_v
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = torch.clamp(x, self.min_v, self.max_v)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -50.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 50.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ClumpNew(nn.Module):
"""Clipping input tensor."""
def __init__(self, min_v: 'int'=-50, max_v: 'int'=50):
"""Class for preparing input for DL model with mixed data.
Args:
min_v: Min value.
max_v: Max value.
"""
super(ClumpNew, self).__init__()
self.min_v = min_v
self.max_v = max_v
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
antigab/LightAutoML
|
Clump
| false
| 14,873
|
[
"Apache-2.0"
] | 766
|
51a4e2bd0ebffbe0817fb50434280f8e7c40fa4c
|
https://github.com/antigab/LightAutoML/tree/51a4e2bd0ebffbe0817fb50434280f8e7c40fa4c
|
NsSymKlCriterion
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class NsSymKlCriterion(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = stable_kl(input, target.detach()) + stable_kl(target, input.
detach())
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp102 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp26 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp55 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp63 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp76 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp77 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp79 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp82 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp85 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1, 1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tmp21 = tmp20 - tmp19
tmp22 = tmp21 + tmp15
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 + tmp15
tmp41 = tmp17 / tmp40
tmp42 = tmp41 * tmp19
tmp43 = tmp42 - tmp19
tmp44 = tmp43 + tmp15
tmp45 = tl_math.log(tmp44)
tmp46 = -tmp45
tmp47 = tmp24 - tmp46
tmp48 = tmp14 * tmp47
tmp49 = 2.0
tmp50 = tmp48 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = _tmp52 + tmp51
_tmp52 = tl.where(rmask, tmp53, _tmp52)
tmp56 = tl_math.exp(tmp55)
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp62 + tmp64
tmp66 = tl_math.log(tmp65)
tmp67 = tmp54 - tmp66
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 + tmp15
tmp70 = tmp17 / tmp69
tmp71 = tmp70 * tmp19
tmp72 = tmp71 - tmp19
tmp73 = tmp72 + tmp15
tmp74 = tl_math.log(tmp73)
tmp75 = -tmp74
tmp78 = tl_math.exp(tmp77)
tmp80 = tl_math.exp(tmp79)
tmp81 = tmp78 + tmp80
tmp83 = tl_math.exp(tmp82)
tmp84 = tmp81 + tmp83
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tl_math.log(tmp87)
tmp89 = tmp76 - tmp88
tmp90 = tl_math.exp(tmp89)
tmp91 = tmp90 + tmp15
tmp92 = tmp17 / tmp91
tmp93 = tmp92 * tmp19
tmp94 = tmp93 - tmp19
tmp95 = tmp94 + tmp15
tmp96 = tl_math.log(tmp95)
tmp97 = -tmp96
tmp98 = tmp75 - tmp97
tmp99 = tmp68 * tmp98
tmp100 = tmp99 * tmp49
tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK])
tmp103 = _tmp102 + tmp101
_tmp102 = tl.where(rmask, tmp103, _tmp102)
tmp52 = tl.sum(_tmp52, 1)[:, None]
tmp102 = tl.sum(_tmp102, 1)[:, None]
tmp104 = 0.015625
tmp105 = tmp52 * tmp104
tmp106 = tmp102 * tmp104
tmp107 = tmp105 + tmp106
tmp108 = 1.0
tmp109 = tmp107 * tmp108
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp109, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, buf7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf10 = buf4
del buf4
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1[
grid(1)](buf10, buf0, buf2, buf5, buf7, 1, 256, XBLOCK=1,
RBLOCK=256, num_warps=8, num_stages=1)
del buf0
del buf2
del buf5
del buf7
return buf10,
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class NsSymKlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anlewy/mt-dnn
|
NsSymKlCriterion
| false
| 14,874
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
BiLinearSim
|
from _paritybench_helpers import _mock_config
import torch
from torch.optim.lr_scheduler import *
class BiLinearSim(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.linear = torch.nn.Linear(config.hidden_size, config.
hidden_size, bias=False)
def forward(self, src, tgt):
src_ = self.linear(src)
output = torch.matmul(src_, tgt.transpose(2, 1))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_3, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
del buf0
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0)
class BiLinearSimNew(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.linear = torch.nn.Linear(config.hidden_size, config.
hidden_size, bias=False)
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
anlewy/mt-dnn
|
BiLinearSim
| false
| 14,875
|
[
"MIT"
] | 2,075
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
ScaleNorm
|
import torch
from torch import nn
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + 0)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tmp15 = 1e-05
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp0 / tmp16
tmp20 = tmp17 * tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)](
primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf0, primals_1
class ScaleNormNew(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, input_0):
primals_2 = self.g
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
antofuller/configaformers
|
ScaleNorm
| false
| 14,876
|
[
"Apache-2.0"
] | 51
|
293253cd35d96c8a24c4004ba3d24fc6dc85a260
|
https://github.com/antofuller/configaformers/tree/293253cd35d96c8a24c4004ba3d24fc6dc85a260
|
RMSNorm
|
import torch
from torch import nn
class RMSNorm(nn.Module):
def __init__(self, dim, eps=1e-08):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
_norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / _norm.clamp(min=self.eps) * self.g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tmp15 = 1e-08
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)](
primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf0, primals_1
class RMSNormNew(nn.Module):
def __init__(self, dim, eps=1e-08):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, input_0):
primals_2 = self.g
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
antofuller/configaformers
|
RMSNorm
| false
| 14,877
|
[
"Apache-2.0"
] | 51
|
293253cd35d96c8a24c4004ba3d24fc6dc85a260
|
https://github.com/antofuller/configaformers/tree/293253cd35d96c8a24c4004ba3d24fc6dc85a260
|
InputProjectionA
|
import torch
import torch.nn as nn
class InputProjectionA(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, samplingTimes):
"""
:param samplingTimes: The rate at which you want to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, samplingTimes):
self.pool.append(nn.AvgPool2d(2, stride=2))
def forward(self, input):
"""
:param input: Input RGB Image
:return: down-sampled image (pyramid-based approach)
"""
for pool in self.pool:
input = pool(input)
return input
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'samplingTimes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16384)](arg0_1, buf0, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused_avg_pool2d_1[grid(4096)](buf0, buf1, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
triton_poi_fused_avg_pool2d_2[grid(1024)](buf1, buf2, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_avg_pool2d_3[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
return buf3,
class InputProjectionANew(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, samplingTimes):
"""
:param samplingTimes: The rate at which you want to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, samplingTimes):
self.pool.append(nn.AvgPool2d(2, stride=2))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
anilsathyan7/Portrait-Segmentation
|
InputProjectionA
| false
| 14,878
|
[
"MIT"
] | 537
|
dbf69b043cf70d3362bc500ee620f20807e622d2
|
https://github.com/anilsathyan7/Portrait-Segmentation/tree/dbf69b043cf70d3362bc500ee620f20807e622d2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.