entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
CIoULoss
|
import math
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def ciou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
factor = 4 / math.pi ** 2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))
return cious
def ciou_loss(pred, target, eps=1e-07):
"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
cious = ciou(pred, target, eps)
loss = 1 - cious
return loss
class CIoULoss(nn.Module):
def __init__(self, eps=1e-06):
super(CIoULoss, self).__init__()
self.eps = eps
def forward(self, pred, target):
return ciou_loss(pred, target, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = 0.25
tmp9 = tmp7 * tmp8
tmp12 = tmp10 + tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp8
tmp19 = tmp9 + tmp18
tmp20 = triton_helpers.maximum(tmp4, tmp1)
tmp21 = triton_helpers.minimum(tmp3, tmp0)
tmp22 = tmp20 - tmp21
tmp23 = 0.0
tmp24 = triton_helpers.maximum(tmp22, tmp23)
tmp25 = tmp24 * tmp24
tmp26 = triton_helpers.maximum(tmp14, tmp11)
tmp27 = triton_helpers.minimum(tmp13, tmp10)
tmp28 = tmp26 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp23)
tmp30 = tmp29 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = 1e-06
tmp33 = tmp31 + tmp32
tmp34 = tmp19 / tmp33
tmp35 = tmp1 - tmp0
tmp36 = tmp11 - tmp10
tmp37 = tmp36 + tmp32
tmp38 = tmp35 / tmp37
tmp39 = libdevice.atan(tmp38)
tmp40 = tmp4 - tmp3
tmp41 = tmp14 - tmp13
tmp42 = tmp41 + tmp32
tmp43 = tmp40 / tmp42
tmp44 = libdevice.atan(tmp43)
tmp45 = tmp39 - tmp44
tmp46 = tmp45 * tmp45
tmp47 = 0.4052847345693511
tmp48 = tmp46 * tmp47
tmp49 = triton_helpers.minimum(tmp4, tmp1)
tmp50 = triton_helpers.maximum(tmp3, tmp0)
tmp51 = tmp49 - tmp50
tmp52 = triton_helpers.maximum(tmp51, tmp23)
tmp53 = triton_helpers.minimum(tmp14, tmp11)
tmp54 = triton_helpers.maximum(tmp13, tmp10)
tmp55 = tmp53 - tmp54
tmp56 = triton_helpers.maximum(tmp55, tmp23)
tmp57 = tmp52 * tmp56
tmp58 = tmp40 * tmp41
tmp59 = tmp35 * tmp36
tmp60 = tmp58 + tmp59
tmp61 = tmp60 - tmp57
tmp62 = tmp61 + tmp32
tmp63 = tmp57 / tmp62
tmp64 = tmp48 * tmp48
tmp65 = 1.0
tmp66 = tmp65 - tmp63
tmp67 = tmp66 + tmp48
tmp68 = tmp64 / tmp67
tmp69 = tmp34 + tmp68
tmp70 = tmp63 - tmp69
tmp71 = tmp65 - tmp70
tl.store(in_out_ptr0 + x2, tmp71, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0[grid(64)](buf4,
arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf4,
def ciou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
factor = 4 / math.pi ** 2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))
return cious
def ciou_loss(pred, target, eps=1e-07):
"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
cious = ciou(pred, target, eps)
loss = 1 - cious
return loss
class CIoULossNew(nn.Module):
def __init__(self, eps=1e-06):
super(CIoULossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
CIoULoss
| false
| 16,808
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
LWS
|
import torch
import torch.nn as nn
class LWS(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super(LWS, self).__init__()
self.fc = nn.Linear(num_features, num_classes, bias=bias)
self.scales = nn.Parameter(torch.ones(num_classes))
for param_name, param in self.fc.named_parameters():
param.requires_grad = False
def forward(self, x):
x = self.fc(x)
x *= self.scales
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_view_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_mul_view_0[grid(256)](buf2, buf0, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf2, buf0
class LWSNew(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super(LWSNew, self).__init__()
self.fc = nn.Linear(num_features, num_classes, bias=bias)
self.scales = nn.Parameter(torch.ones(num_classes))
for param_name, param in self.fc.named_parameters():
param.requires_grad = False
def forward(self, input_0):
primals_2 = self.scales
primals_1 = self.fc.weight
primals_4 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
zhangyongshun/BagofTricks-LT
|
LWS
| false
| 16,809
|
[
"MIT"
] | 115
|
aec4d9a552236c32231374b7b00fa5bf4208dae3
|
https://github.com/zhangyongshun/BagofTricks-LT/tree/aec4d9a552236c32231374b7b00fa5bf4208dae3
|
RNN
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
return output, hidden
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
return Variable(torch.zeros(batch_size, self.hidden_size))
else:
return Variable(torch.zeros(batch_size, self.hidden_size))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
return buf2, buf1, buf0
class RNNNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNNew, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
return Variable(torch.zeros(batch_size, self.hidden_size))
else:
return Variable(torch.zeros(batch_size, self.hidden_size))
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_5 = self.i2o.weight
primals_6 = self.i2o.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
zhiyongc/Graph_Convolutional_LSTM
|
RNN
| false
| 16,810
|
[
"MIT"
] | 281
|
a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
https://github.com/zhiyongc/Graph_Convolutional_LSTM/tree/a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
CosineClassifier
|
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
def cosine_fully_connected_layer(x_in, weight, scale=None, bias=None,
normalize_x=True, normalize_w=True):
assert x_in.dim() == 2
assert weight.dim() == 2
assert x_in.size(1) == weight.size(0)
if normalize_x:
x_in = F.normalize(x_in, p=2, dim=1, eps=1e-12)
if normalize_w:
weight = F.normalize(weight, p=2, dim=0, eps=1e-12)
x_out = torch.mm(x_in, weight)
if scale is not None:
x_out = x_out * scale.view(1, -1)
if bias is not None:
x_out = x_out + bias.view(1, -1)
return x_out
class CosineClassifier(nn.Module):
def __init__(self, num_channels, num_classes, scale=1.0, learn_scale=
False, bias=False, normalize_x=True, normalize_w=True):
super().__init__()
self.num_channels = num_channels
self.num_classes = num_classes
self.normalize_x = normalize_x
self.normalize_w = normalize_w
weight = torch.FloatTensor(num_classes, num_channels).normal_(0.0,
np.sqrt(2.0 / num_channels))
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(num_classes).fill_(0.0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.bias = None
scale_cls = torch.FloatTensor(1).fill_(scale)
self.scale_cls = nn.Parameter(scale_cls, requires_grad=learn_scale)
def forward(self, x_in):
assert x_in.dim() == 2
return cosine_fully_connected_layer(x_in, self.weight.t(), scale=
self.scale_cls, bias=self.bias, normalize_x=self.normalize_x,
normalize_w=self.normalize_w)
def extra_repr(self):
s = ('num_channels={}, num_classes={}, scale_cls={} (learnable={})'
.format(self.num_channels, self.num_classes, self.scale_cls.
item(), self.scale_cls.requires_grad))
learnable = self.scale_cls.requires_grad
s = (
f'num_channels={self.num_channels}, num_classes={self.num_classes}, scale_cls={self.scale_cls.item()} (learnable={learnable}), normalize_x={self.normalize_x}, normalize_w={self.normalize_w}'
)
if self.bias is None:
s += ', bias=False'
return s
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
buf3 = buf2
del buf2
triton_poi_fused_mul_1[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf3, primals_2, primals_3, buf0
def cosine_fully_connected_layer(x_in, weight, scale=None, bias=None,
normalize_x=True, normalize_w=True):
assert x_in.dim() == 2
assert weight.dim() == 2
assert x_in.size(1) == weight.size(0)
if normalize_x:
x_in = F.normalize(x_in, p=2, dim=1, eps=1e-12)
if normalize_w:
weight = F.normalize(weight, p=2, dim=0, eps=1e-12)
x_out = torch.mm(x_in, weight)
if scale is not None:
x_out = x_out * scale.view(1, -1)
if bias is not None:
x_out = x_out + bias.view(1, -1)
return x_out
class CosineClassifierNew(nn.Module):
def __init__(self, num_channels, num_classes, scale=1.0, learn_scale=
False, bias=False, normalize_x=True, normalize_w=True):
super().__init__()
self.num_channels = num_channels
self.num_classes = num_classes
self.normalize_x = normalize_x
self.normalize_w = normalize_w
weight = torch.FloatTensor(num_classes, num_channels).normal_(0.0,
np.sqrt(2.0 / num_channels))
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(num_classes).fill_(0.0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.bias = None
scale_cls = torch.FloatTensor(1).fill_(scale)
self.scale_cls = nn.Parameter(scale_cls, requires_grad=learn_scale)
def extra_repr(self):
s = ('num_channels={}, num_classes={}, scale_cls={} (learnable={})'
.format(self.num_channels, self.num_classes, self.scale_cls.
item(), self.scale_cls.requires_grad))
learnable = self.scale_cls.requires_grad
s = (
f'num_channels={self.num_channels}, num_classes={self.num_classes}, scale_cls={self.scale_cls.item()} (learnable={learnable}), normalize_x={self.normalize_x}, normalize_w={self.normalize_w}'
)
if self.bias is None:
s += ', bias=False'
return s
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.scale_cls
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
zheang01/FACT
|
CosineClassifier
| false
| 16,811
|
[
"MIT"
] | 65
|
a877cc86acc4d29fb7589c8ac571c8aef09e5fd8
|
https://github.com/zheang01/FACT/tree/a877cc86acc4d29fb7589c8ac571c8aef09e5fd8
|
GIoULoss
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def giou_loss(pred, target, eps=1e-07):
"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
class GIoULoss(nn.Module):
def __init__(self, eps=1e-06):
super(GIoULoss, self).__init__()
self.eps = eps
def forward(self, pred, target):
return giou_loss(pred, target, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = triton_helpers.maximum(tmp0, tmp1)
tmp27 = triton_helpers.minimum(tmp3, tmp4)
tmp28 = tmp26 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp7)
tmp30 = triton_helpers.maximum(tmp9, tmp10)
tmp31 = triton_helpers.minimum(tmp12, tmp13)
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp32, tmp7)
tmp34 = tmp29 * tmp33
tmp35 = 9.999999974752427e-07
tmp36 = triton_helpers.maximum(tmp34, tmp35)
tmp37 = triton_helpers.maximum(tmp25, tmp35)
tmp38 = tmp17 / tmp37
tmp39 = tmp36 - tmp37
tmp40 = tmp39 / tmp36
tmp41 = tmp38 - tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tl.store(in_out_ptr0 + x0, tmp43, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0[grid(64)](
buf3, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def giou_loss(pred, target, eps=1e-07):
"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
class GIoULossNew(nn.Module):
def __init__(self, eps=1e-06):
super(GIoULossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
GIoULoss
| false
| 16,812
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
BoundedIoULoss
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def bounded_iou_loss(pred, target, beta=0.2, eps=0.001):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max((target_w - 2 * dx.abs()) / (target_w + 2 * dx.
abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max((target_h - 2 * dy.abs()) / (target_h + 2 * dy.
abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w +
eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h +
eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(
loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=0.001):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
def forward(self, pred, target):
return bounded_iou_loss(pred, target, self.beta, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_lt_mul_stack_sub_where_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x1 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp6 + tmp5
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = tl.load(in_ptr1 + (x1 + 64 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp9
tmp15 = tmp10 - tmp14
tmp16 = tl_math.abs(tmp15)
tmp17 = 2.0
tmp18 = tmp16 * tmp17
tmp19 = tmp7 - tmp18
tmp20 = tmp7 + tmp18
tmp21 = 0.001
tmp22 = tmp20 + tmp21
tmp23 = tmp19 / tmp22
tmp24 = 0.0
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp4, tmp27, tmp28)
tmp30 = tmp0 >= tmp3
tmp31 = tl.full([1], 2, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 - tmp35
tmp37 = tmp35 + tmp34
tmp38 = tmp37 * tmp9
tmp39 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp9
tmp43 = tmp38 - tmp42
tmp44 = tl_math.abs(tmp43)
tmp45 = tmp44 * tmp17
tmp46 = tmp36 - tmp45
tmp47 = tmp36 + tmp45
tmp48 = tmp47 + tmp21
tmp49 = tmp46 / tmp48
tmp50 = triton_helpers.maximum(tmp49, tmp24)
tmp51 = tmp26 - tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp33, tmp51, tmp52)
tmp54 = tmp0 >= tmp31
tmp55 = tl.full([1], 3, tl.int64)
tmp56 = tmp0 < tmp55
tmp57 = tmp54 & tmp56
tmp58 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp59 = tl.load(in_ptr0 + (x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp60 = tmp58 - tmp59
tmp61 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr1 + (x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp63 = tmp61 - tmp62
tmp64 = tmp63 + tmp21
tmp65 = tmp60 / tmp64
tmp66 = tmp60 + tmp21
tmp67 = tmp63 / tmp66
tmp68 = triton_helpers.minimum(tmp65, tmp67)
tmp69 = tmp26 - tmp68
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp57, tmp69, tmp70)
tmp72 = tmp0 >= tmp55
tl.full([1], 4, tl.int64)
tmp75 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp77 = tmp75 - tmp76
tmp78 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp79 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp80 = tmp78 - tmp79
tmp81 = tmp80 + tmp21
tmp82 = tmp77 / tmp81
tmp83 = tmp77 + tmp21
tmp84 = tmp80 / tmp83
tmp85 = triton_helpers.minimum(tmp82, tmp84)
tmp86 = tmp26 - tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp72, tmp86, tmp87)
tmp89 = tl.where(tmp57, tmp71, tmp88)
tmp90 = tl.where(tmp33, tmp53, tmp89)
tmp91 = tl.where(tmp4, tmp29, tmp90)
tmp92 = 0.2
tmp93 = tmp91 < tmp92
tmp94 = tmp91 * tmp9
tmp95 = tmp94 * tmp91
tmp96 = 5.0
tmp97 = tmp95 * tmp96
tmp98 = 0.1
tmp99 = tmp91 - tmp98
tmp100 = tl.where(tmp93, tmp97, tmp99)
tl.store(in_out_ptr0 + x3, tmp100, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 64), (64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_div_lt_mul_stack_sub_where_0[grid(256)](buf1,
arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def bounded_iou_loss(pred, target, beta=0.2, eps=0.001):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max((target_w - 2 * dx.abs()) / (target_w + 2 * dx.
abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max((target_h - 2 * dy.abs()) / (target_h + 2 * dy.
abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w +
eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h +
eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(
loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
class BoundedIoULossNew(nn.Module):
def __init__(self, beta=0.2, eps=0.001):
super(BoundedIoULossNew, self).__init__()
self.beta = beta
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
BoundedIoULoss
| false
| 16,813
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
Attention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from queue import *
from math import *
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.randn(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def forward(self, hidden, context):
"""
hidden: [batch, hidden_size]
context: [seq, batch, hidden_size]
return the context vector for decoding: [batch, hidden]
"""
timestep = context.shape[0]
h = hidden.repeat(timestep, 1, 1).transpose(0, 1)
context = context.transpose(0, 1)
attn_energies = self.score(h, context)
score = F.softmax(attn_energies, dim=1).unsqueeze(1)
context = torch.bmm(score, context).squeeze(1)
return context
def score(self, hidden, context):
"""
hidden: [batch, seq, hidden]
context: [batch, seq, hidden]
"""
energy = torch.tanh(self.attn(torch.cat([hidden, context], 2)))
energy = energy.transpose(1, 2)
v = self.v.repeat(context.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy.squeeze(1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from queue import *
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x1 = xindex // 8 % 4
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 1, 4), (4, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out
=buf7)
del buf6
return reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(buf0
, (16, 8), (8, 1), 0), buf2, buf4, reinterpret_tensor(primals_1, (4,
4, 4), (4, 1, 16), 0), reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0
)
class AttentionNew(nn.Module):
def __init__(self, hidden_size):
super(AttentionNew, self).__init__()
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.randn(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def score(self, hidden, context):
"""
hidden: [batch, seq, hidden]
context: [batch, seq, hidden]
"""
energy = torch.tanh(self.attn(torch.cat([hidden, context], 2)))
energy = energy.transpose(1, 2)
v = self.v.repeat(context.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy.squeeze(1)
def forward(self, input_0, input_1):
primals_4 = self.v
primals_3 = self.attn.weight
primals_5 = self.attn.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
zhongerqiandan/OpenDialog
|
Attention
| false
| 16,814
|
[
"MIT"
] | 98
|
f478b2a912c8c742da5ced510ac40da59217ddb3
|
https://github.com/zhongerqiandan/OpenDialog/tree/f478b2a912c8c742da5ced510ac40da59217ddb3
|
segmentation_layer
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class segmentation_layer(nn.Module):
def __init__(self, args):
super(segmentation_layer, self).__init__()
self.segm_layer = nn.Conv2d(32, args.snumclass, kernel_size=1)
def forward(self, featMap):
segm = self.segm_layer(featMap)
return segm
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(snumclass=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class segmentation_layerNew(nn.Module):
def __init__(self, args):
super(segmentation_layerNew, self).__init__()
self.segm_layer = nn.Conv2d(32, args.snumclass, kernel_size=1)
def forward(self, input_0):
primals_1 = self.segm_layer.weight
primals_2 = self.segm_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
zhenpeiyang/RelativePose
|
segmentation_layer
| false
| 16,815
|
[
"BSD-3-Clause"
] | 144
|
2e9fdf5003c5952cf610f8c6d891519b9e9e014b
|
https://github.com/zhenpeiyang/RelativePose/tree/2e9fdf5003c5952cf610f8c6d891519b9e9e014b
|
MyUpsample2
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class MyUpsample2(nn.Module):
def forward(self, x):
return x[:, :, :, None, :, None].expand(-1, -1, -1, 2, -1, 2).reshape(x
.size(0), x.size(1), x.size(2) * 2, x.size(3) * 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 4
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0),
class MyUpsample2New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zigonk/ReSC
|
MyUpsample2
| false
| 16,816
|
[
"MIT"
] | 57
|
c816365b0410f521974060ef0cc6eaa1dd09b63a
|
https://github.com/zigonk/ReSC/tree/c816365b0410f521974060ef0cc6eaa1dd09b63a
|
BCEFocalLoss
|
import torch
class BCEFocalLoss(torch.nn.Module):
"""
二分类的Focalloss alpha 固定
"""
def __init__(self, gamma=2, alpha=0.25, reduction='sum', loss_weight=1.0):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, _input, target):
pt = torch.sigmoid(_input)
alpha = self.alpha
loss = -alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - (
1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
if self.reduction == 'elementwise_mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss * self.loss_weight / 54
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -0.25
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.log(tmp1)
tmp10 = tmp8 * tmp9
tmp11 = tmp1 * tmp1
tmp12 = 0.75
tmp13 = tmp11 * tmp12
tmp14 = tmp2 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = tl_math.log(tmp3)
tmp17 = tmp15 * tmp16
tmp18 = tmp10 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = tmp21 * tmp2
tmp23 = 0.018518518518518517
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEFocalLossNew(torch.nn.Module):
"""
二分类的Focalloss alpha 固定
"""
def __init__(self, gamma=2, alpha=0.25, reduction='sum', loss_weight=1.0):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhiqi-li/Panoptic-SegFormer
|
BCEFocalLoss
| false
| 16,817
|
[
"Apache-2.0"
] | 97
|
cdb9b68059e9ef825a3f7079c37aa835b1711227
|
https://github.com/zhiqi-li/Panoptic-SegFormer/tree/cdb9b68059e9ef825a3f7079c37aa835b1711227
|
LAM_Gconv
|
import torch
import torch.nn as nn
class LAM_Gconv(nn.Module):
def __init__(self, in_features, out_features, activation=nn.ReLU(
inplace=True)):
super(LAM_Gconv, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self.activation = activation
def laplacian(self, A_hat):
D_hat = (torch.sum(A_hat, 0) + 1e-05) ** -0.5
L = D_hat * A_hat * D_hat
return L
def laplacian_batch(self, A_hat):
batch, N = A_hat.shape[:2]
D_hat = (torch.sum(A_hat, 1) + 1e-05) ** -0.5
L = D_hat.view(batch, N, 1) * A_hat * D_hat.view(batch, 1, N)
return L
def forward(self, X, A):
batch = X.size(0)
A_hat = A.unsqueeze(0).repeat(batch, 1, 1)
X = self.fc(torch.bmm(self.laplacian_batch(A_hat), X))
if self.activation is not None:
X = self.activation(X)
return X
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = -0.5
tmp10 = libdevice.pow(tmp8, tmp9)
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp19 + tmp7
tmp21 = libdevice.pow(tmp20, tmp9)
tmp22 = tmp12 * tmp21
tl.store(out_ptr0 + x5, tmp22, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_repeat_0[grid(64)](primals_2, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, primals_1, out=buf1)
del primals_1
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf3,
primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf3, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), buf4
class LAM_GconvNew(nn.Module):
def __init__(self, in_features, out_features, activation=nn.ReLU(
inplace=True)):
super(LAM_GconvNew, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self.activation = activation
def laplacian(self, A_hat):
D_hat = (torch.sum(A_hat, 0) + 1e-05) ** -0.5
L = D_hat * A_hat * D_hat
return L
def laplacian_batch(self, A_hat):
batch, N = A_hat.shape[:2]
D_hat = (torch.sum(A_hat, 1) + 1e-05) ** -0.5
L = D_hat.view(batch, N, 1) * A_hat * D_hat.view(batch, 1, N)
return L
def forward(self, input_0, input_1):
primals_2 = self.fc.weight
primals_4 = self.fc.bias
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
zhaoweixi/GraFormer
|
LAM_Gconv
| false
| 16,818
|
[
"BSD-2-Clause"
] | 384
|
0a0a04014cdf157c11ab8e952862efa27c6a1980
|
https://github.com/zhaoweixi/GraFormer/tree/0a0a04014cdf157c11ab8e952862efa27c6a1980
|
IRHead
|
import torch
import torch.nn as nn
from queue import *
from math import *
class IRHead(nn.Module):
def __init__(self, hidden_size, dropout=0.5):
super(IRHead, self).__init__()
self.M = nn.Parameter(torch.randn(hidden_size, hidden_size))
self.hidden_layer = nn.Linear(hidden_size * 2 + 1, hidden_size)
self.opt_layer = nn.Linear(hidden_size, 2)
self.hidden_drop = nn.Dropout(p=dropout)
def forward(self, src_embed, tgt_embed):
"""
src_embed: [batch, hidden]
tgt_embed: [batch, hidden]
return the score: [batch, 2]
"""
src_hidden = src_embed.unsqueeze(1)
tgt_hidden = tgt_embed.unsqueeze(2)
score = torch.bmm(torch.matmul(src_hidden, self.M), tgt_hidden
).squeeze(2)
src_hidden = src_hidden.squeeze(1)
tgt_hidden = tgt_hidden.squeeze(2)
inpt = torch.cat([src_hidden, score, tgt_hidden], 1)
inpt = self.hidden_drop(torch.tanh(self.hidden_layer(inpt)))
score = self.opt_layer(inpt)
return score
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from queue import *
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x1 = xindex // 9
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 9, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-5 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 9), (9, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_2, (4, 4, 1), (4, 1, 1), 0), out=buf1
)
buf2 = empty_strided_cuda((4, 9), (9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(36)](primals_1, buf1, primals_2, buf2,
36, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
buf3 = buf0
del buf0
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (9, 4), (1, 9
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_tanh_1[grid(16)](buf4, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, buf2, buf4, primals_6, primals_4, reinterpret_tensor(primals_2
, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (
1, 4), 0)
class IRHeadNew(nn.Module):
def __init__(self, hidden_size, dropout=0.5):
super(IRHeadNew, self).__init__()
self.M = nn.Parameter(torch.randn(hidden_size, hidden_size))
self.hidden_layer = nn.Linear(hidden_size * 2 + 1, hidden_size)
self.opt_layer = nn.Linear(hidden_size, 2)
self.hidden_drop = nn.Dropout(p=dropout)
def forward(self, input_0, input_1):
primals_1 = self.M
primals_4 = self.hidden_layer.weight
primals_5 = self.hidden_layer.bias
primals_6 = self.opt_layer.weight
primals_7 = self.opt_layer.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
zhongerqiandan/OpenDialog
|
IRHead
| false
| 16,819
|
[
"MIT"
] | 98
|
f478b2a912c8c742da5ced510ac40da59217ddb3
|
https://github.com/zhongerqiandan/OpenDialog/tree/f478b2a912c8c742da5ced510ac40da59217ddb3
|
DenseBlock
|
import torch
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
def initialize_weights_xavier(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class DenseBlock(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=32, bias=True
):
super(DenseBlock, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(channel_in + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(channel_in + 4 * gc, channel_out, 3, 1, 1,
bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
if init == 'xavier':
initialize_weights_xavier([self.conv1, self.conv2, self.conv3,
self.conv4], 0.1)
else:
initialize_weights([self.conv1, self.conv2, self.conv3, self.
conv4], 0.1)
initialize_weights(self.conv5, 0)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel_in': 4, 'channel_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 36
x0 = xindex % 16
x2 = xindex // 576
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 36, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 68
x0 = xindex % 16
x2 = xindex // 1088
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 68, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 16 * (-36 + x1) + 512 * x2), tmp20 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + (-36 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x3, tmp32, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 100
x0 = xindex % 16
x2 = xindex // 1600
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-36 + x1) + 512 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-36 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tl.full([1], 100, tl.int64)
tmp35 = tl.load(in_ptr5 + (x0 + 16 * (-68 + x1) + 512 * x2), tmp32 &
xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + (-68 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 132
x0 = xindex % 16
x2 = xindex // 2112
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-36 + x1) + 512 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-36 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 100, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + 16 * (-68 + x1) + 512 * x2), tmp35 &
xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + (-68 + x1), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tl.full([1], 132, tl.int64)
tmp47 = tl.load(in_ptr7 + (x0 + 16 * (-100 + x1) + 512 * x2), tmp44 &
xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + (-100 + x1), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + x3, tmp58, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 36, 3, 3), (324, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 68, 3, 3), (612, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 100, 3, 3), (900, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (4, 132, 3, 3), (1188, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = empty_strided_cuda((4, 36, 4, 4), (576, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(2304)](primals_3, buf0, primals_2, buf1,
2304, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = empty_strided_cuda((4, 68, 4, 4), (1088, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_1[grid(4352)](primals_3, buf0, primals_2, buf2,
primals_5, buf3, 4352, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1))
buf5 = empty_strided_cuda((4, 100, 4, 4), (1600, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(6400)](primals_3, buf0, primals_2, buf2,
primals_5, buf4, primals_7, buf5, 6400, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 4, 4), (512, 16, 4, 1))
buf7 = empty_strided_cuda((4, 132, 4, 4), (2112, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(8448)](primals_3, buf0, primals_2, buf2,
primals_5, buf4, primals_7, buf6, primals_9, buf7, 8448, XBLOCK
=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_4[grid(256)](buf9, primals_11, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf6, primals_9, buf10, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf4, primals_7, buf11, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf2, primals_5, buf12, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf0, primals_2, buf13, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13)
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
def initialize_weights_xavier(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class DenseBlockNew(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=32, bias=True
):
super(DenseBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(channel_in + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(channel_in + 4 * gc, channel_out, 3, 1, 1,
bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
if init == 'xavier':
initialize_weights_xavier([self.conv1, self.conv2, self.conv3,
self.conv4], 0.1)
else:
initialize_weights([self.conv1, self.conv2, self.conv3, self.
conv4], 0.1)
initialize_weights(self.conv5, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
yzxing87/Invertible-ISP
|
DenseBlock
| false
| 16,820
|
[
"MIT"
] | 246
|
344dd333dd2a075f6a9e4ffc445dc387ca3014c4
|
https://github.com/yzxing87/Invertible-ISP/tree/344dd333dd2a075f6a9e4ffc445dc387ca3014c4
|
LSTM
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
class LSTM(nn.Module):
def __init__(self, input_size, cell_size, hidden_size):
"""
cell_size is the size of cell_state.
hidden_size is the size of hidden_state, or say the output_state of each step
"""
super(LSTM, self).__init__()
self.cell_size = cell_size
self.hidden_size = hidden_size
self.fl = nn.Linear(input_size + hidden_size, hidden_size)
self.il = nn.Linear(input_size + hidden_size, hidden_size)
self.ol = nn.Linear(input_size + hidden_size, hidden_size)
self.Cl = nn.Linear(input_size + hidden_size, hidden_size)
def forward(self, input, Hidden_State, Cell_State):
combined = torch.cat((input, Hidden_State), 1)
f = F.sigmoid(self.fl(combined))
i = F.sigmoid(self.il(combined))
o = F.sigmoid(self.ol(combined))
C = F.tanh(self.Cl(combined))
Cell_State = f * Cell_State + i * C
Hidden_State = o * F.tanh(Cell_State)
return Hidden_State, Cell_State
def loop(self, inputs):
batch_size = inputs.size(0)
time_step = inputs.size(1)
Hidden_State, Cell_State = self.initHidden(batch_size)
for i in range(time_step):
Hidden_State, Cell_State = self.forward(torch.squeeze(inputs[:,
i:i + 1, :]), Hidden_State, Cell_State)
return Hidden_State, Cell_State
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
else:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'cell_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask)
tmp10 = tl.load(in_ptr4 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tl.sigmoid(tmp10)
tmp12 = libdevice.tanh(tmp9)
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf0, reinterpret_tensor(primals_7,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_7
del primals_8
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, buf0, reinterpret_tensor(primals_9,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_10
del primals_9
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_1[grid(16)](buf1, primals_11,
buf2, buf4, buf3, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf6, buf5, primals_11, buf0, buf1, buf2, buf3, buf4, buf5
class LSTMNew(nn.Module):
def __init__(self, input_size, cell_size, hidden_size):
"""
cell_size is the size of cell_state.
hidden_size is the size of hidden_state, or say the output_state of each step
"""
super(LSTMNew, self).__init__()
self.cell_size = cell_size
self.hidden_size = hidden_size
self.fl = nn.Linear(input_size + hidden_size, hidden_size)
self.il = nn.Linear(input_size + hidden_size, hidden_size)
self.ol = nn.Linear(input_size + hidden_size, hidden_size)
self.Cl = nn.Linear(input_size + hidden_size, hidden_size)
def loop(self, inputs):
batch_size = inputs.size(0)
time_step = inputs.size(1)
Hidden_State, Cell_State = self.initHidden(batch_size)
for i in range(time_step):
Hidden_State, Cell_State = self.forward(torch.squeeze(inputs[:,
i:i + 1, :]), Hidden_State, Cell_State)
return Hidden_State, Cell_State
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
else:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
def forward(self, input_0, input_1, input_2):
primals_3 = self.fl.weight
primals_4 = self.fl.bias
primals_5 = self.il.weight
primals_6 = self.il.bias
primals_7 = self.ol.weight
primals_8 = self.ol.bias
primals_9 = self.Cl.weight
primals_10 = self.Cl.bias
primals_1 = input_0
primals_2 = input_1
primals_11 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
zhiyongc/Graph_Convolutional_LSTM
|
LSTM
| false
| 16,821
|
[
"MIT"
] | 281
|
a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
https://github.com/zhiyongc/Graph_Convolutional_LSTM/tree/a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
BG_loss
|
import torch
import torch.nn as nn
import torch.utils.data.distributed
class BG_loss(nn.Module):
def __init__(self):
super(BG_loss, self).__init__()
self.loss = nn.L1Loss()
def forward(self, real_imgs, fake_imgs, masks):
real_imgs_ = real_imgs.clone()
fake_imgs_ = fake_imgs.clone()
for index in range(len(real_imgs)):
real_imgs_[index] = masks[index] * real_imgs[index]
fake_imgs_[index] = masks[index] * fake_imgs[index]
loss = self.loss(real_imgs_, fake_imgs_)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex // 64
r0 = rindex % 64
r2 = rindex
tmp3 = tl.load(in_ptr0 + (64 + r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + r2, None)
tmp14 = tl.load(in_ptr2 + (64 + r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + r0, None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + r2, None)
tmp23 = tl.load(in_ptr0 + (192 + r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (192 + r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (128 + r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (128 + r0), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (192 + r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (128 + r0), None, eviction_policy='evict_last')
tmp0 = r1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp3 * tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = tmp0 == tmp6
tmp10 = tmp8 * tmp9
tmp12 = tl.where(tmp7, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp5, tmp12)
tmp15 = tmp3 * tmp14
tmp17 = tmp8 * tmp16
tmp19 = tl.where(tmp7, tmp17, tmp18)
tmp20 = tl.where(tmp2, tmp15, tmp19)
tmp21 = tl.full([1], 3, tl.int32)
tmp22 = tmp0 == tmp21
tmp25 = tmp23 * tmp24
tmp26 = tl.full([1], 2, tl.int32)
tmp27 = tmp0 == tmp26
tmp30 = tmp28 * tmp29
tmp31 = tl.where(tmp27, tmp30, tmp13)
tmp32 = tl.where(tmp22, tmp25, tmp31)
tmp34 = tmp23 * tmp33
tmp36 = tmp28 * tmp35
tmp37 = tl.where(tmp27, tmp36, tmp20)
tmp38 = tl.where(tmp22, tmp34, tmp37)
tmp39 = tmp32 - tmp38
tmp40 = tl_math.abs(tmp39)
tmp41 = tl.broadcast_to(tmp40, [RBLOCK])
tmp43 = triton_helpers.promote_to_tensor(tl.sum(tmp41, 0))
tmp44 = 256.0
tmp45 = tmp43 / tmp44
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp45, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf3, arg2_1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class BG_lossNew(nn.Module):
def __init__(self):
super(BG_lossNew, self).__init__()
self.loss = nn.L1Loss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
ziqi-jin/OpenUnReID
|
BG_loss
| false
| 16,822
|
[
"Apache-2.0"
] | 344
|
50eb516945c418398cac890029d1b366c27c0185
|
https://github.com/ziqi-jin/OpenUnReID/tree/50eb516945c418398cac890029d1b366c27c0185
|
SmoothSoftmax
|
import torch
from torch import Tensor
from torch import nn
class SmoothSoftmax(nn.Module):
def forward(self, x: 'Tensor'):
logistic_value = torch.sigmoid(x)
return logistic_value / logistic_value.sum(dim=-1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sigmoid_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl.sigmoid(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sigmoid_sum_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SmoothSoftmaxNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zsl24/voice-activity-detection
|
SmoothSoftmax
| false
| 16,823
|
[
"MIT"
] | 74
|
a034be23c6283121c6b72e778c6ff6711045cbe3
|
https://github.com/zsl24/voice-activity-detection/tree/a034be23c6283121c6b72e778c6ff6711045cbe3
|
Quaternion
|
import torch
import torch.nn as nn
import torch.utils.data
class Quaternion(nn.Module):
def __init__(self):
super(Quaternion, self).__init__()
def forward(self, rvec):
theta = torch.sqrt(1e-05 + torch.sum(rvec ** 2, dim=1))
rvec = rvec / theta[:, None]
return torch.stack((1.0 - 2.0 * rvec[:, 1] ** 2 - 2.0 * rvec[:, 2] **
2, 2.0 * (rvec[:, 0] * rvec[:, 1] - rvec[:, 2] * rvec[:, 3]),
2.0 * (rvec[:, 0] * rvec[:, 2] + rvec[:, 1] * rvec[:, 3]), 2.0 *
(rvec[:, 0] * rvec[:, 1] + rvec[:, 2] * rvec[:, 3]), 1.0 - 2.0 *
rvec[:, 0] ** 2 - 2.0 * rvec[:, 2] ** 2, 2.0 * (rvec[:, 1] *
rvec[:, 2] - rvec[:, 0] * rvec[:, 3]), 2.0 * (rvec[:, 0] * rvec
[:, 2] - rvec[:, 1] * rvec[:, 3]), 2.0 * (rvec[:, 0] * rvec[:,
3] + rvec[:, 1] * rvec[:, 2]), 1.0 - 2.0 * rvec[:, 0] ** 2 -
2.0 * rvec[:, 1] ** 2), dim=1).view(-1, 3, 3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_rsub_sub_1(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp7 = tmp6 * tmp6
tmp8 = tmp7 * tmp2
tmp9 = tmp5 - tmp8
tmp11 = tmp10 * tmp0
tmp13 = tmp6 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp2
tmp16 = tmp10 * tmp6
tmp17 = tmp0 * tmp12
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp2
tmp20 = tmp11 + tmp13
tmp21 = tmp20 * tmp2
tmp22 = tmp0 * tmp6
tmp23 = tmp10 * tmp12
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp2
tmp26 = tmp16 - tmp17
tmp27 = tmp26 * tmp2
tmp28 = tmp23 + tmp22
tmp29 = tmp28 * tmp2
tmp30 = tmp10 * tmp10
tmp31 = tmp30 * tmp2
tmp32 = tmp4 - tmp31
tmp33 = tmp32 - tmp8
tmp34 = tmp32 - tmp3
tl.store(out_ptr0 + (x0 + 144 * x1), tmp9, xmask)
tl.store(out_ptr1 + (x0 + 144 * x1), tmp15, xmask)
tl.store(out_ptr2 + (x0 + 144 * x1), tmp19, xmask)
tl.store(out_ptr3 + (x0 + 144 * x1), tmp21, xmask)
tl.store(out_ptr4 + (x0 + 144 * x1), tmp25, xmask)
tl.store(out_ptr5 + (x0 + 144 * x1), tmp27, xmask)
tl.store(out_ptr6 + (x0 + 144 * x1), tmp29, xmask)
tl.store(out_ptr7 + (x0 + 144 * x1), tmp33, xmask)
tl.store(out_ptr8 + (x0 + 144 * x1), tmp34, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf10 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 0)
buf2 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 16)
buf3 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 32)
buf4 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 48)
buf6 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 80)
buf7 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 96)
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 112)
buf5 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 64)
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 128)
triton_poi_fused_add_mul_pow_rsub_sub_1[grid(64)](buf0, buf1, buf2,
buf3, buf4, buf6, buf7, buf8, buf5, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return reinterpret_tensor(buf10, (64, 3, 3), (9, 3, 1), 0),
class QuaternionNew(nn.Module):
def __init__(self):
super(QuaternionNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zhuhao-nju/mofanerf
|
Quaternion
| false
| 16,824
|
[
"MIT"
] | 55
|
0206526e25aab3dd8f0cc789f290c7559642676b
|
https://github.com/zhuhao-nju/mofanerf/tree/0206526e25aab3dd8f0cc789f290c7559642676b
|
Rodrigues
|
import torch
import torch.nn as nn
import torch.utils.data
class Rodrigues(nn.Module):
def __init__(self):
super(Rodrigues, self).__init__()
def forward(self, rvec):
theta = torch.sqrt(1e-05 + torch.sum(rvec ** 2, dim=1))
rvec = rvec / theta[:, None]
costh = torch.cos(theta)
sinth = torch.sin(theta)
return torch.stack((rvec[:, 0] ** 2 + (1.0 - rvec[:, 0] ** 2) *
costh, rvec[:, 0] * rvec[:, 1] * (1.0 - costh) - rvec[:, 2] *
sinth, rvec[:, 0] * rvec[:, 2] * (1.0 - costh) + rvec[:, 1] *
sinth, rvec[:, 0] * rvec[:, 1] * (1.0 - costh) + rvec[:, 2] *
sinth, rvec[:, 1] ** 2 + (1.0 - rvec[:, 1] ** 2) * costh, rvec[
:, 1] * rvec[:, 2] * (1.0 - costh) - rvec[:, 0] * sinth, rvec[:,
0] * rvec[:, 2] * (1.0 - costh) - rvec[:, 1] * sinth, rvec[:, 1
] * rvec[:, 2] * (1.0 - costh) + rvec[:, 0] * sinth, rvec[:, 2] **
2 + (1.0 - rvec[:, 2] ** 2) * costh), dim=1).view(-1, 3, 3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tl_math.cos(tmp17)
tmp19 = tmp3 * tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp0 * tmp21
tmp23 = tmp2 - tmp18
tmp24 = tmp22 * tmp23
tmp26 = tl_math.sin(tmp17)
tmp27 = tmp25 * tmp26
tmp28 = tmp24 - tmp27
tmp29 = tmp0 * tmp25
tmp30 = tmp29 * tmp23
tmp31 = tmp21 * tmp26
tmp32 = tmp30 + tmp31
tmp33 = tmp24 + tmp27
tmp34 = tmp21 * tmp25
tmp35 = tmp34 * tmp23
tmp36 = tmp0 * tmp26
tmp37 = tmp35 - tmp36
tmp38 = tmp30 - tmp31
tmp39 = tmp35 + tmp36
tmp40 = tmp21 * tmp21
tmp41 = tmp2 - tmp40
tmp42 = tmp41 * tmp18
tmp43 = tmp40 + tmp42
tmp44 = tmp25 * tmp25
tmp45 = tmp2 - tmp44
tmp46 = tmp45 * tmp18
tmp47 = tmp44 + tmp46
tl.store(out_ptr0 + (x0 + 144 * x1), tmp20, xmask)
tl.store(out_ptr1 + (x0 + 144 * x1), tmp28, xmask)
tl.store(out_ptr2 + (x0 + 144 * x1), tmp32, xmask)
tl.store(out_ptr3 + (x0 + 144 * x1), tmp33, xmask)
tl.store(out_ptr4 + (x0 + 144 * x1), tmp37, xmask)
tl.store(out_ptr5 + (x0 + 144 * x1), tmp38, xmask)
tl.store(out_ptr6 + (x0 + 144 * x1), tmp39, xmask)
tl.store(out_ptr7 + (x0 + 144 * x1), tmp43, xmask)
tl.store(out_ptr8 + (x0 + 144 * x1), tmp47, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 0)
buf2 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 16)
buf3 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 32)
buf4 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 48)
buf6 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 80)
buf7 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 96)
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 112)
buf5 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 64)
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 128)
triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1[grid(64)](buf0
, arg0_1, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf5, buf9,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf0
return reinterpret_tensor(buf10, (64, 3, 3), (9, 3, 1), 0),
class RodriguesNew(nn.Module):
def __init__(self):
super(RodriguesNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zhuhao-nju/mofanerf
|
Rodrigues
| false
| 16,825
|
[
"MIT"
] | 55
|
0206526e25aab3dd8f0cc789f290c7559642676b
|
https://github.com/zhuhao-nju/mofanerf/tree/0206526e25aab3dd8f0cc789f290c7559642676b
|
ChebConv
|
import torch
import torch.nn as nn
from torch.nn import init
class ChebConv(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input channels.
:param out_c: int, number of output channels.
:param K: int, the order of Chebyshev Polynomial.
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super(ChebConv, self).__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def forward(self, inputs, graph):
"""
:param inputs: the input data, [B, N, C]
:param graph: the graph structure, [N, N]
:return: convolution result, [B, N, D]
"""
L = ChebConv.get_laplacian(graph, self.normalize)
mul_L = self.cheb_polynomial(L).unsqueeze(1)
result = torch.matmul(mul_L, inputs)
result = torch.matmul(result, self.weight)
result = torch.sum(result, dim=0) + self.bias
return result
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_c': 4, 'out_c': 4, 'K': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = -0.5
tmp11 = libdevice.pow(tmp9, tmp10)
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_diag_embed_eye_sub_1(in_out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp6 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_diag_embed_eye_zeros_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x1
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp5, tmp11, tmp10)
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_sum_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (5, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_diag_embed_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, primals_1, out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = buf2
del buf2
triton_poi_fused_diag_embed_eye_sub_1[grid(16)](buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_diag_embed_eye_zeros_2[grid(80)](buf3, buf4, 80,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = buf1
del buf1
extern_kernels.mm(buf3, reinterpret_tensor(buf4, (4, 4), (4, 1), 16
), out=buf5)
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sub_3[grid(80)](buf5, buf4, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.mm(buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 32
), out=buf7)
buf8 = buf4
del buf4
triton_poi_fused_mul_sub_4[grid(80)](buf7, buf6, buf8, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf9 = buf7
del buf7
extern_kernels.mm(buf3, reinterpret_tensor(buf8, (4, 4), (4, 1), 48
), out=buf9)
del buf3
buf10 = buf6
del buf6
triton_poi_fused_mul_sub_5[grid(80)](buf9, buf8, buf10, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf8, (20, 4), (4, 1), 0)
del buf8
extern_kernels.mm(reinterpret_tensor(buf10, (20, 4), (4, 1), 0),
primals_2, out=buf11)
del primals_2
buf12 = buf10
del buf10
extern_kernels.bmm(reinterpret_tensor(buf11, (5, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_3, (5, 4, 4), (16, 4, 1), 0),
out=buf12)
del primals_3
buf13 = reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_sum_6[grid(16)](buf12, primals_4, buf13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf12
del primals_4
return buf13, reinterpret_tensor(buf11, (5, 4, 4), (16, 1, 4), 0)
class ChebConvNew(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input channels.
:param out_c: int, number of output channels.
:param K: int, the order of Chebyshev Polynomial.
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super(ChebConvNew, self).__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
zhaoweixi/GraFormer
|
ChebConv
| false
| 16,826
|
[
"BSD-2-Clause"
] | 384
|
0a0a04014cdf157c11ab8e952862efa27c6a1980
|
https://github.com/zhaoweixi/GraFormer/tree/0a0a04014cdf157c11ab8e952862efa27c6a1980
|
Attention
|
import torch
from torch import Tensor
from torch import nn
class Attention(nn.Module):
def forward(self, selected_input: 'Tensor', attention: 'Tensor'):
attended_input = selected_input * attention.unsqueeze(-1)
return attended_input
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x5, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(1024)](arg1_1, arg0_1, buf0, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AttentionNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zsl24/voice-activity-detection
|
Attention
| false
| 16,827
|
[
"MIT"
] | 74
|
a034be23c6283121c6b72e778c6ff6711045cbe3
|
https://github.com/zsl24/voice-activity-detection/tree/a034be23c6283121c6b72e778c6ff6711045cbe3
|
RerangeLayer
|
import torch
import torch.utils.data
import torch.nn as nn
class RerangeLayer(nn.Module):
def __init__(self):
super(RerangeLayer, self).__init__()
def forward(self, inp):
return (inp + 1.0) / 2.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class RerangeLayerNew(nn.Module):
def __init__(self):
super(RerangeLayerNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zvict/HyperRIM
|
RerangeLayer
| false
| 16,828
|
[
"Apache-2.0"
] | 92
|
f3800196b59ea0f94561efa88ec2e6675e4c8b00
|
https://github.com/zvict/HyperRIM/tree/f3800196b59ea0f94561efa88ec2e6675e4c8b00
|
FocalLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLoss, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, output, target):
cross_entropy = F.cross_entropy(output, target)
torch.log(cross_entropy)
logpt = -F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = -(1 - pt) ** self.focusing_param * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp25
tmp27 = -tmp26
tmp28 = tmp27 * tmp22
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1[grid(1)](
buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLossNew, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zwx8981/DBCNN-Pytorch
|
FocalLoss
| false
| 16,829
|
[
"MIT"
] | 150
|
16c3156054a30a3eabb45dffcf538f42452a14f3
|
https://github.com/zwx8981/DBCNN-Pytorch/tree/16c3156054a30a3eabb45dffcf538f42452a14f3
|
cross_entropy_prob
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class cross_entropy_prob(nn.Module):
def __init__(self):
super(cross_entropy_prob, self).__init__()
def forward(self, pred, soft_targets):
pred = F.log_softmax(pred)
loss = torch.mean(torch.sum(-soft_targets * pred, 1))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp21 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class cross_entropy_probNew(nn.Module):
def __init__(self):
super(cross_entropy_probNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zwx8981/DBCNN-Pytorch
|
cross_entropy_prob
| false
| 16,830
|
[
"MIT"
] | 150
|
16c3156054a30a3eabb45dffcf538f42452a14f3
|
https://github.com/zwx8981/DBCNN-Pytorch/tree/16c3156054a30a3eabb45dffcf538f42452a14f3
|
SelfAttentionBlock
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(SelfAttentionBlock, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
x = x + self.drop_path(self.attn(self.norm1(x), q_ape, k_ape, attn_pos)
)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp8 + tmp11
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
tl.store(out_ptr2 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (16, 4), (4, 1))
assert_size_stride(primals_15, (16,), (1,))
assert_size_stride(primals_16, (4, 16), (16, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, primals_4, primals_6, buf2, buf3,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_4
del primals_6
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf4
triton_poi_fused_clone_2[grid(16, 4)](buf6, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf9, (16, 1, 4), (4, 0, 1), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf6
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf10, primals_9,
buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused__softmax_add_mul_4[grid(256)](buf13, primals_9,
buf11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_clone_2[grid(16, 4)](buf7, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15)
buf16 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0)
del buf11
triton_poi_fused_clone_2[grid(16, 4)](buf15, buf16, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0)
del buf15
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17)
buf18 = buf1
del buf1
buf19 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf17,
primals_11, buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf17,
primals_11, buf18, buf19, primals_12, primals_13, buf20, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf18
del buf19
del primals_13
buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf20, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf21)
del primals_15
buf22 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_7[grid(256)](buf21, buf22, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_16, (16, 4), (1, 16), 0), out=buf23)
buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0)
del buf23
triton_poi_fused_add_8[grid(64)](buf24, primals_3, buf17,
primals_11, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
return buf24, primals_3, primals_11, primals_12, reinterpret_tensor(buf3,
(16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4), (4, 1), 0
), reinterpret_tensor(buf2, (16, 4), (4, 1), 0
), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), buf17, reinterpret_tensor(buf20, (16, 4), (4, 1), 0
), buf21, reinterpret_tensor(buf22, (16, 16), (16, 1), 0
), primals_16, primals_14, primals_10, reinterpret_tensor(buf14, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_7, primals_5
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttentionBlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(SelfAttentionBlockNew, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.q.weight
primals_5 = self.attn.k.weight
primals_6 = self.attn.v.weight
primals_7 = self.attn.proj.weight
primals_11 = self.attn.proj.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_14 = self.mlp.fc1.weight
primals_15 = self.mlp.fc1.bias
primals_16 = self.mlp.fc2.weight
primals_17 = self.mlp.fc2.bias
primals_3 = input_0
primals_8 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
zhangzhengde0225/SwinTrack
|
SelfAttentionBlock
| false
| 16,831
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
A2Block
|
import torch
import torch.nn as nn
class A2Block(nn.Module):
"""
Implementation of A2Block(NIPS 2018)
"""
def __init__(self, inplane, plane):
super(A2Block, self).__init__()
self.down = nn.Conv2d(inplane, plane, 1)
self.up = nn.Conv2d(plane, inplane, 1)
self.gather_down = nn.Conv2d(inplane, plane, 1)
self.distribue_down = nn.Conv2d(inplane, plane, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
res = x
A = self.down(res)
B = self.gather_down(res)
b, c, h, _w = A.size()
A = A.view(b, c, -1)
B = B.view(b, c, -1)
B = self.softmax(B)
B = B.permute(0, 2, 1)
G = torch.bmm(A, B)
C = self.distribue_down(res)
C = C.view(b, c, -1)
C = self.softmax(C)
C = C.permute(0, 2, 1)
atten = torch.bmm(C, G)
atten = atten.permute(0, 2, 1).view(b, c, h, -1)
atten = self.up(atten)
out = res + atten
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplane': 4, 'plane': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp8 / tmp12
tl.store(out_ptr2 + (r2 + 16 * x3), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp2 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf1, primals_5, buf5, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), out=buf6)
buf7 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
triton_per_fused__softmax_1[grid(16)](buf7, primals_7, buf10, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf7, (4, 16, 4), (64, 4, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 16, 4), (64, 1, 16
), 0), buf6, out=buf11)
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4,
4, 4), (64, 1, 16, 4), 0), primals_8, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 1, 16, 4))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_2[grid(16, 16)](primals_1, buf12,
primals_9, buf13, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
del buf12
del primals_9
return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8,
buf5, buf10, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 1, 16, 4),
0), reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0))
class A2BlockNew(nn.Module):
"""
Implementation of A2Block(NIPS 2018)
"""
def __init__(self, inplane, plane):
super(A2BlockNew, self).__init__()
self.down = nn.Conv2d(inplane, plane, 1)
self.up = nn.Conv2d(plane, inplane, 1)
self.gather_down = nn.Conv2d(inplane, plane, 1)
self.distribue_down = nn.Conv2d(inplane, plane, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_2 = self.down.weight
primals_3 = self.down.bias
primals_4 = self.up.weight
primals_5 = self.up.bias
primals_6 = self.gather_down.weight
primals_7 = self.gather_down.bias
primals_8 = self.distribue_down.weight
primals_9 = self.distribue_down.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
zj1008/GALD-DGCNet
|
A2Block
| false
| 16,832
|
[
"MIT"
] | 127
|
be7ebfe2b3d28ea28a2b4714852999d4af2a785e
|
https://github.com/zj1008/GALD-DGCNet/tree/be7ebfe2b3d28ea28a2b4714852999d4af2a785e
|
BoundedSingleVar
|
import torch
class BoundedSingleVar(torch.nn.Module):
"""Wrapper a single parameter to represent an unknown coefficient in inverse problem with the upper and lower bound.
:param lower_bound: The lower bound for the parameter.
:type lower_bound: float
:param upper_bound: The upper bound for the parameter.
:type upper_bound: float
"""
def __init__(self, lower_bound, upper_bound):
super().__init__()
self.value = torch.nn.Parameter(torch.Tensor([0.0]))
self.layer = torch.nn.Sigmoid()
self.ub, self.lb = upper_bound, lower_bound
def forward(self, x) ->torch.Tensor:
return x[:, :1] * 0.0 + self.layer(self.value) * (self.ub - self.lb
) + self.lb
def get_value(self) ->torch.Tensor:
return self.layer(self.value) * (self.ub - self.lb) + self.lb
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'lower_bound': 4, 'upper_bound': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 * tmp1
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp5 * tmp1
tmp7 = tmp2 + tmp6
tmp8 = 4.0
tmp9 = tmp7 + tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(64)](primals_1, primals_2,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
return buf0, primals_2
class BoundedSingleVarNew(torch.nn.Module):
"""Wrapper a single parameter to represent an unknown coefficient in inverse problem with the upper and lower bound.
:param lower_bound: The lower bound for the parameter.
:type lower_bound: float
:param upper_bound: The upper bound for the parameter.
:type upper_bound: float
"""
def __init__(self, lower_bound, upper_bound):
super().__init__()
self.value = torch.nn.Parameter(torch.Tensor([0.0]))
self.layer = torch.nn.Sigmoid()
self.ub, self.lb = upper_bound, lower_bound
def get_value(self) ->torch.Tensor:
return self.layer(self.value) * (self.ub - self.lb) + self.lb
def forward(self, input_0):
primals_2 = self.value
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
zweien/idrlnet
|
BoundedSingleVar
| false
| 16,833
|
[
"Apache-2.0"
] | 66
|
3a19a3301d565c0906aac84ff31eefcff75726a8
|
https://github.com/zweien/idrlnet/tree/3a19a3301d565c0906aac84ff31eefcff75726a8
|
FcCat
|
import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class FcCatNew(nn.Module):
def __init__(self, nIn, nOut):
super(FcCatNew, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
zwh930712/densenet.pytorch
|
FcCat
| false
| 16,834
|
[
"Apache-2.0"
] | 826
|
d1cd5e1957975628286e516512c6d1c14430f810
|
https://github.com/zwh930712/densenet.pytorch/tree/d1cd5e1957975628286e516512c6d1c14430f810
|
CrossAttentionBlock
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(CrossAttentionBlock, self).__init__()
self.norm1_q = norm_layer(dim)
self.norm1_kv = norm_layer(dim)
self.attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
q = q + self.drop_path(self.attn(self.norm1_q(q), self.norm1_kv(kv),
q_ape, k_ape, attn_pos))
q = q + self.drop_path(self.mlp(self.norm2(q)))
return q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (16, 4), (4, 1))
assert_size_stride(primals_18, (16,), (1,))
assert_size_stride(primals_19, (4, 16), (16, 1))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_6, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_6, buf2,
buf3, primals_4, primals_5, primals_9, buf4, buf7, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf2
del buf3
del primals_4
del primals_5
del primals_9
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, primals_7, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_7
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf6, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf6, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf6
triton_poi_fused_clone_3[grid(16, 4)](buf8, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf11, (16, 1, 4), (4, 0, 1), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf8
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_4[grid(64)](buf12, primals_12,
buf13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf12
triton_poi_fused__softmax_add_mul_5[grid(256)](buf15, primals_12,
buf13, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_12
buf16 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf14
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf16, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0)
del buf9
extern_kernels.bmm(reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 0), 0), out=buf17)
buf18 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0)
del buf13
triton_poi_fused_clone_3[grid(16, 4)](buf17, buf18, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf19 = reinterpret_tensor(buf17, (16, 4), (4, 1), 0)
del buf17
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = buf1
del buf1
buf21 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_3, buf19,
primals_14, buf20, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_3, buf19,
primals_14, buf20, buf21, primals_15, primals_16, buf22, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf20
del buf21
del primals_16
buf23 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_18, reinterpret_tensor(buf22, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_17, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf23)
del primals_18
buf24 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_8[grid(256)](buf23, buf24, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_19, (16, 4), (1, 16), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
triton_poi_fused_add_9[grid(64)](buf26, primals_3, buf19,
primals_14, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
return (buf26, primals_3, primals_6, primals_14, primals_15,
reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(
buf7, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1),
0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf19,
reinterpret_tensor(buf22, (16, 4), (4, 1), 0), buf23,
reinterpret_tensor(buf24, (16, 16), (16, 1), 0), primals_19,
primals_17, primals_13, reinterpret_tensor(buf16, (16, 1, 4), (4, 1,
1), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 4), 0), primals_11,
primals_10, primals_8)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(CrossAttentionBlockNew, self).__init__()
self.norm1_q = norm_layer(dim)
self.norm1_kv = norm_layer(dim)
self.attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0, input_1, input_2, input_3, input_4):
primals_1 = self.norm1_q.weight
primals_2 = self.norm1_q.bias
primals_4 = self.norm1_kv.weight
primals_5 = self.norm1_kv.bias
primals_8 = self.attn.q.weight
primals_10 = self.attn.k.weight
primals_11 = self.attn.v.weight
primals_13 = self.attn.proj.weight
primals_14 = self.attn.proj.bias
primals_15 = self.norm2.weight
primals_16 = self.norm2.bias
primals_17 = self.mlp.fc1.weight
primals_18 = self.mlp.fc1.bias
primals_19 = self.mlp.fc2.weight
primals_20 = self.mlp.fc2.bias
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
primals_9 = input_3
primals_12 = input_4
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
|
zhangzhengde0225/SwinTrack
|
CrossAttentionBlock
| false
| 16,835
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
Net
|
import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class Net(nn.Module):
def __init__(self, nFeatures, nHidden1, nHidden2):
super(Net, self).__init__()
self.l1 = FcCat(nFeatures, nHidden1)
self.l2 = FcCat(nFeatures + nHidden1, nHidden2)
def forward(self, x):
out = self.l1(x)
out = self.l2(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nFeatures': 4, 'nHidden1': 4, 'nHidden2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
x1 = xindex // 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8)
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf3)
buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0)
triton_poi_fused_cat_1[grid(32)](buf2, buf4, 32, XBLOCK=32,
num_warps=1, num_stages=1)
return buf5, primals_2, buf2, primals_3
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class NetNew(nn.Module):
def __init__(self, nFeatures, nHidden1, nHidden2):
super(NetNew, self).__init__()
self.l1 = FcCat(nFeatures, nHidden1)
self.l2 = FcCat(nFeatures + nHidden1, nHidden2)
def forward(self, input_0):
primals_1 = self.l1.fc.weight
primals_3 = self.l2.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
zwh930712/densenet.pytorch
|
Net
| false
| 16,836
|
[
"Apache-2.0"
] | 826
|
d1cd5e1957975628286e516512c6d1c14430f810
|
https://github.com/zwh930712/densenet.pytorch/tree/d1cd5e1957975628286e516512c6d1c14430f810
|
SpatialSoftmaxBZ
|
import torch
import numpy as np
import torch.nn.functional as F
class SpatialSoftmaxBZ(torch.nn.Module):
"""
IMPORTANT:
i in [0, 1], where 0 is at the bottom, 1 is at the top
j in [-1, 1]
"""
def __init__(self, height, width):
super().__init__()
self.height = height
self.width = width
pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self.height), np.
linspace(-1.0, 1.0, self.width))
self.pos_x = torch.from_numpy(pos_x).reshape(-1).float()
self.pos_x = torch.nn.Parameter(self.pos_x, requires_grad=False)
self.pos_y = torch.from_numpy(pos_y).reshape(-1).float()
self.pos_y = torch.nn.Parameter(self.pos_y, requires_grad=False)
def forward(self, feature):
flattened = feature.view(feature.shape[0], feature.shape[1], -1)
softmax = F.softmax(flattened, dim=-1)
expected_x = torch.sum(self.pos_y * softmax, dim=-1)
expected_x = (-expected_x + 1) / 2.0
expected_y = torch.sum(self.pos_x * softmax, dim=-1)
expected_xy = torch.stack([expected_x, expected_y], dim=2)
return expected_xy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'height': 4, 'width': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_mul_stack_sum_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp6 / tmp10
tmp13 = tmp11 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp18 * tmp12
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = -tmp17
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = 0.5
tmp28 = tmp26 * tmp27
tl.store(out_ptr4 + 2 * x0, tmp28, xmask)
tl.store(out_ptr5 + 2 * x0, tmp23, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (16,), (1,))
assert_size_stride(arg2_1, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf4 = reinterpret_tensor(buf6, (4, 4, 1), (8, 2, 1), 0)
buf5 = reinterpret_tensor(buf6, (4, 4, 1), (8, 2, 1), 1)
get_raw_stream(0)
triton_per_fused__softmax_mul_stack_sum_0[grid(16)](arg0_1, arg1_1,
arg2_1, buf4, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf6,
class SpatialSoftmaxBZNew(torch.nn.Module):
"""
IMPORTANT:
i in [0, 1], where 0 is at the bottom, 1 is at the top
j in [-1, 1]
"""
def __init__(self, height, width):
super().__init__()
self.height = height
self.width = width
pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self.height), np.
linspace(-1.0, 1.0, self.width))
self.pos_x = torch.from_numpy(pos_x).reshape(-1).float()
self.pos_x = torch.nn.Parameter(self.pos_x, requires_grad=False)
self.pos_y = torch.from_numpy(pos_y).reshape(-1).float()
self.pos_y = torch.nn.Parameter(self.pos_y, requires_grad=False)
def forward(self, input_0):
arg1_1 = self.pos_x
arg2_1 = self.pos_y
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
zwc662/SequentialAttack
|
SpatialSoftmaxBZ
| false
| 16,837
|
[
"MIT"
] | 116
|
677b19c51ea76d794939ee126fccd75ffa0e6fe6
|
https://github.com/zwc662/SequentialAttack/tree/677b19c51ea76d794939ee126fccd75ffa0e6fe6
|
AttentionLayer
|
import torch
import torch.nn.functional as F
import torch.utils.data
import torch.distributed
import torch.nn as nn
import torch.optim
import torch.optim.lr_scheduler
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(2 * output_embed_dim, output_embed_dim,
bias=False)
def forward(self, input, source_hids):
x = self.input_proj(input)
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
attn_scores = F.softmax(attn_scores.t(), dim=1).t()
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_embed_dim': 4, 'output_embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.distributed
import torch.nn as nn
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp14 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp16
tl.store(in_out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1), (1, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0[grid(4)](buf2, primals_3, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf2, primals_3, primals_2, buf3,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (8, 4), (1, 8
), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_tanh_2[grid(16)](buf5, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf5, reinterpret_tensor(buf2, (1, 4), (1, 1), 0
), primals_2, primals_3, buf2, buf3, buf5, primals_4
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
class AttentionLayerNew(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(2 * output_embed_dim, output_embed_dim,
bias=False)
def forward(self, input_0, input_1):
primals_1 = self.input_proj.weight
primals_4 = self.output_proj.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
zsquaredz/XSum
|
AttentionLayer
| false
| 16,838
|
[
"MIT"
] | 235
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
https://github.com/zsquaredz/XSum/tree/10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
PSNR
|
import torch
from torch.nn.modules.loss import _Loss
class PSNR(_Loss):
def __init__(self):
super(PSNR, self).__init__()
self.val_range = 255
def _quantize(self, img):
img = img * self.val_range
img = img.clamp(0, self.val_range).round()
return img
def forward(self, x, y):
diff = self._quantize(x) - self._quantize(y)
if x.dim() == 3:
n = 1
elif x.dim() == 4:
n = x.size(0)
elif x.dim() == 5:
n = x.size(0) * x.size(1)
mse = diff.div(self.val_range).pow(2).view(n, -1).mean(dim=-1)
psnr = -10 * mse.log10()
return psnr.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = triton_helpers.minimum(tmp4, tmp1)
tmp6 = libdevice.nearbyint(tmp5)
tmp8 = tmp7 * tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = triton_helpers.minimum(tmp9, tmp1)
tmp11 = libdevice.nearbyint(tmp10)
tmp12 = tmp6 - tmp11
tmp13 = 0.00392156862745098
tmp14 = tmp12 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused_log10_mean_mul_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = libdevice.log10(tmp2)
tmp4 = -10.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](arg0_1, arg1_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_log10_mean_mul_1[grid(1)](buf2, buf0, 1, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf0
return buf2,
class PSNRNew(_Loss):
def __init__(self):
super(PSNRNew, self).__init__()
self.val_range = 255
def _quantize(self, img):
img = img * self.val_range
img = img.clamp(0, self.val_range).round()
return img
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zzh-tech/RSCD
|
PSNR
| false
| 16,839
|
[
"MIT"
] | 57
|
b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e
|
https://github.com/zzh-tech/RSCD/tree/b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e
|
DenseLayer
|
import torch
import torch.nn as nn
def actFunc(act, *args, **kwargs):
act = act.lower()
if act == 'relu':
return nn.ReLU()
elif act == 'relu6':
return nn.ReLU6()
elif act == 'leakyrelu':
return nn.LeakyReLU(0.1)
elif act == 'prelu':
return nn.PReLU()
elif act == 'rrelu':
return nn.RReLU(0.1, 0.3)
elif act == 'selu':
return nn.SELU()
elif act == 'celu':
return nn.CELU()
elif act == 'elu':
return nn.ELU()
elif act == 'gelu':
return nn.GELU()
elif act == 'tanh':
return nn.Tanh()
else:
raise NotImplementedError
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class DenseLayer(nn.Module):
"""
Dense layer for residual dense block
"""
def __init__(self, in_chs, growth_rate, activation='relu'):
super(DenseLayer, self).__init__()
self.conv = conv3x3(in_chs, growth_rate)
self.act = actFunc(activation)
def forward(self, x):
out = self.act(self.conv(x))
out = torch.cat((x, out), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4, 'growth_rate': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf0, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf1, primals_1, primals_2, buf2
def actFunc(act, *args, **kwargs):
act = act.lower()
if act == 'relu':
return nn.ReLU()
elif act == 'relu6':
return nn.ReLU6()
elif act == 'leakyrelu':
return nn.LeakyReLU(0.1)
elif act == 'prelu':
return nn.PReLU()
elif act == 'rrelu':
return nn.RReLU(0.1, 0.3)
elif act == 'selu':
return nn.SELU()
elif act == 'celu':
return nn.CELU()
elif act == 'elu':
return nn.ELU()
elif act == 'gelu':
return nn.GELU()
elif act == 'tanh':
return nn.Tanh()
else:
raise NotImplementedError
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class DenseLayerNew(nn.Module):
"""
Dense layer for residual dense block
"""
def __init__(self, in_chs, growth_rate, activation='relu'):
super(DenseLayerNew, self).__init__()
self.conv = conv3x3(in_chs, growth_rate)
self.act = actFunc(activation)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
zzh-tech/RSCD
|
DenseLayer
| false
| 16,840
|
[
"MIT"
] | 57
|
b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e
|
https://github.com/zzh-tech/RSCD/tree/b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e
|
TxtNet
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class TxtNet(nn.Module):
def __init__(self, code_len, txt_feat_len):
super(TxtNet, self).__init__()
self.fc1 = nn.Linear(txt_feat_len, 4096)
self.fc2 = nn.Linear(4096, code_len)
self.alpha = 1.0
def forward(self, x):
feat = F.relu(self.fc1(x))
hid = self.fc2(feat)
code = F.tanh(self.alpha * hid)
return feat, hid, code
def set_alpha(self, epoch):
self.alpha = math.pow(1.0 * epoch + 1.0, 0.5)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'code_len': 4, 'txt_feat_len': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4096), (4096, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(262144)](buf1, primals_2, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4096),
(4096, 1), 0), reinterpret_tensor(primals_4, (4096, 4), (1,
4096), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class TxtNetNew(nn.Module):
def __init__(self, code_len, txt_feat_len):
super(TxtNetNew, self).__init__()
self.fc1 = nn.Linear(txt_feat_len, 4096)
self.fc2 = nn.Linear(4096, code_len)
self.alpha = 1.0
def set_alpha(self, epoch):
self.alpha = math.pow(1.0 * epoch + 1.0, 0.5)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
|
zzs1994/DJsRH
|
TxtNet
| false
| 16,841
|
[
"MIT"
] | 53
|
6041c2df810723dd0052e2e5b7c6bd33033f0f21
|
https://github.com/zzs1994/DJsRH/tree/6041c2df810723dd0052e2e5b7c6bd33033f0f21
|
FeatureFusion
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class FeatureFusion(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(FeatureFusion, self).__init__()
self.z_norm1 = norm_layer(dim)
self.x_norm1 = norm_layer(dim)
self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.z_norm2_1 = norm_layer(dim)
self.z_norm2_2 = norm_layer(dim)
self.x_norm2_1 = norm_layer(dim)
self.x_norm2_2 = norm_layer(dim)
self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
mlp_hidden_dim = int(dim * mlp_ratio)
self.z_norm3 = norm_layer(dim)
self.x_norm3 = norm_layer(dim)
self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, z, x, z_self_attn_pos, x_self_attn_pos,
z_x_cross_attn_pos, x_z_cross_attn_pos):
z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None,
z_self_attn_pos))
x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None,
x_self_attn_pos))
z = z + self.drop_path(self.z_x_cross_attention(self.z_norm2_1(z),
self.x_norm2_1(x), None, None, z_x_cross_attn_pos))
x = x + self.drop_path(self.x_z_cross_attention(self.x_norm2_2(x),
self.z_norm2_2(z), None, None, x_z_cross_attn_pos))
z = z + self.drop_path(self.z_mlp(self.z_norm3(z)))
x = x + self.drop_path(self.x_mlp(self.x_norm3(x)))
return z, x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.
rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp11 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp4 * tmp9
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (4, 4), (4, 1))
assert_size_stride(primals_25, (4, 4), (4, 1))
assert_size_stride(primals_26, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_27, (4, 4), (4, 1))
assert_size_stride(primals_28, (4,), (1,))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (4,), (1,))
assert_size_stride(primals_31, (4,), (1,))
assert_size_stride(primals_32, (4,), (1,))
assert_size_stride(primals_33, (4, 4), (4, 1))
assert_size_stride(primals_34, (4, 4), (4, 1))
assert_size_stride(primals_35, (4, 4), (4, 1))
assert_size_stride(primals_36, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_37, (4, 4), (4, 1))
assert_size_stride(primals_38, (4,), (1,))
assert_size_stride(primals_39, (4,), (1,))
assert_size_stride(primals_40, (4,), (1,))
assert_size_stride(primals_41, (16, 4), (4, 1))
assert_size_stride(primals_42, (16,), (1,))
assert_size_stride(primals_43, (4, 16), (16, 1))
assert_size_stride(primals_44, (4,), (1,))
assert_size_stride(primals_45, (4,), (1,))
assert_size_stride(primals_46, (4,), (1,))
assert_size_stride(primals_47, (16, 4), (4, 1))
assert_size_stride(primals_48, (16,), (1,))
assert_size_stride(primals_49, (4, 16), (16, 1))
assert_size_stride(primals_50, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf6, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf3
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf7, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf4
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf8, primals_7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_add_mul_4[grid(256)](buf11, primals_7,
buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_2[grid(16, 4)](buf5, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_clone_2[grid(16, 4)](buf13, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_native_layer_norm_0[grid(16)](primals_12, buf16,
buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_12, buf16,
buf17, primals_10, primals_11, buf18, 64, XBLOCK=64, num_warps=
1, num_stages=1)
del primals_10
del primals_11
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf21)
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf19, buf22, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf19, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf19
triton_poi_fused_clone_2[grid(16, 4)](buf20, buf23, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf23, (16, 1, 4), (4, 0, 1), 0), out=buf24)
buf25 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf20
buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf24, primals_16,
buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf27 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused__softmax_add_mul_4[grid(256)](buf27, primals_16,
buf25, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
buf28 = reinterpret_tensor(buf26, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf26
triton_poi_fused_clone_2[grid(16, 4)](buf21, buf28, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf29 = reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 1), 0)
del buf21
extern_kernels.bmm(reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf28, (16, 4, 1), (4, 1, 0), 0), out=buf29)
buf30 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
triton_poi_fused_clone_2[grid(16, 4)](buf29, buf30, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf31 = reinterpret_tensor(buf29, (16, 4), (4, 1), 0)
del buf29
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf31)
buf32 = buf17
del buf17
buf33 = buf16
del buf16
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf15,
primals_9, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_12,
buf31, primals_18, buf34, buf35, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf55 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_12,
buf31, primals_18, buf34, buf35, primals_21, primals_22,
primals_29, primals_30, buf39, buf55, 64, XBLOCK=64, num_warps=
1, num_stages=1)
del buf34
del buf35
del primals_22
del primals_30
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_3, buf15,
primals_9, buf32, buf33, primals_19, primals_20, buf37, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf37, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 4), (1, 4), 0), out=buf38)
buf40 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf40)
buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf41)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf38, buf42, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf43 = reinterpret_tensor(buf38, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf38
triton_poi_fused_clone_2[grid(16, 4)](buf40, buf43, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf44 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf42, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf43, (16, 1, 4), (4, 0, 1), 0), out=buf44)
buf45 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf40
buf46 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf44, primals_26,
buf45, buf46, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf47 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf44
triton_poi_fused__softmax_add_mul_4[grid(256)](buf47, primals_26,
buf45, buf46, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_26
buf48 = reinterpret_tensor(buf46, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf46
triton_poi_fused_clone_2[grid(16, 4)](buf41, buf48, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf49 = reinterpret_tensor(buf41, (16, 4, 1), (4, 1, 1), 0)
del buf41
extern_kernels.bmm(reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf48, (16, 4, 1), (4, 1, 0), 0), out=buf49)
buf50 = reinterpret_tensor(buf45, (4, 4, 4), (16, 4, 1), 0)
del buf45
triton_poi_fused_clone_2[grid(16, 4)](buf49, buf50, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf51 = reinterpret_tensor(buf49, (16, 4), (4, 1), 0)
del buf49
extern_kernels.mm(reinterpret_tensor(buf50, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf51)
buf52 = reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0)
del buf51
triton_poi_fused_add_8[grid(64)](buf52, primals_3, buf15, primals_9,
primals_28, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_28
buf53 = buf33
del buf33
buf54 = buf32
del buf32
triton_poi_fused_native_layer_norm_0[grid(16)](buf52, buf53, buf54,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_33, (4, 4), (1, 4), 0), out=buf56)
buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf71 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf52, buf53, buf54,
primals_31, primals_32, primals_39, primals_40, buf57, buf71,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_32
del primals_40
buf58 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_34, (4, 4), (1, 4), 0), out=buf58)
buf59 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_35, (4, 4), (1, 4), 0), out=buf59)
buf60 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf56, buf60, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf61 = reinterpret_tensor(buf56, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf56
triton_poi_fused_clone_2[grid(16, 4)](buf58, buf61, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf62 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf60, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf61, (16, 1, 4), (4, 0, 1), 0), out=buf62)
buf63 = reinterpret_tensor(buf58, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf58
buf64 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf62, primals_36,
buf63, buf64, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf65 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf62
triton_poi_fused__softmax_add_mul_4[grid(256)](buf65, primals_36,
buf63, buf64, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_36
buf66 = reinterpret_tensor(buf64, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf64
triton_poi_fused_clone_2[grid(16, 4)](buf59, buf66, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf67 = reinterpret_tensor(buf59, (16, 4, 1), (4, 1, 1), 0)
del buf59
extern_kernels.bmm(reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf66, (16, 4, 1), (4, 1, 0), 0), out=buf67)
buf68 = reinterpret_tensor(buf63, (4, 4, 4), (16, 4, 1), 0)
del buf63
triton_poi_fused_clone_2[grid(16, 4)](buf67, buf68, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf69 = reinterpret_tensor(buf67, (16, 4), (4, 1), 0)
del buf67
extern_kernels.mm(reinterpret_tensor(buf68, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_37, (4, 4), (1, 4), 0), out=buf69)
buf70 = reinterpret_tensor(buf69, (4, 4, 4), (16, 4, 1), 0)
del buf69
triton_poi_fused_add_8[grid(64)](buf70, primals_12, buf31,
primals_18, primals_38, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_38
buf72 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_42, reinterpret_tensor(buf71, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_41, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf72)
del primals_42
buf73 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf72, buf73, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf74 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf73, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_43, (16, 4), (1, 16), 0), out=buf74)
buf75 = reinterpret_tensor(buf74, (4, 4, 4), (16, 4, 1), 0)
del buf74
triton_poi_fused_add_11[grid(64)](buf75, buf52, primals_44, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_44
buf76 = buf54
del buf54
buf77 = buf53
del buf53
triton_poi_fused_native_layer_norm_0[grid(16)](buf70, buf76, buf77,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf78 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf70, buf76, buf77,
primals_45, primals_46, buf78, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf76
del buf77
del primals_46
buf79 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_48, reinterpret_tensor(buf78, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_47, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf79)
del primals_48
buf80 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf79, buf80, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf81 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf80, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_49, (16, 4), (1, 16), 0), out=buf81)
buf82 = reinterpret_tensor(buf81, (4, 4, 4), (16, 4, 1), 0)
del buf81
triton_poi_fused_add_11[grid(64)](buf82, buf70, primals_50, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_50
return (buf75, buf82, primals_3, primals_9, primals_12, primals_18,
primals_19, primals_21, primals_29, primals_31, primals_39,
primals_45, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11,
reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15,
reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf27,
reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf31,
reinterpret_tensor(buf37, (16, 4), (4, 1), 0), reinterpret_tensor(
buf39, (16, 4), (4, 1), 0), buf47, reinterpret_tensor(buf50, (16, 4
), (4, 1), 0), buf52, reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(buf57, (16, 4), (4, 1), 0), buf65,
reinterpret_tensor(buf68, (16, 4), (4, 1), 0), buf70,
reinterpret_tensor(buf71, (16, 4), (4, 1), 0), buf72,
reinterpret_tensor(buf73, (16, 16), (16, 1), 0), reinterpret_tensor
(buf78, (16, 4), (4, 1), 0), buf79, reinterpret_tensor(buf80, (16,
16), (16, 1), 0), primals_49, primals_47, primals_43, primals_41,
primals_37, reinterpret_tensor(buf66, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf60, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf61, (16, 4, 1), (4, 1, 4), 0), primals_35,
primals_34, primals_33, primals_27, reinterpret_tensor(buf48, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf42, (16, 1, 4), (4, 1,
1), 0), reinterpret_tensor(buf43, (16, 4, 1), (4, 1, 4), 0),
primals_25, primals_24, primals_23, primals_17, reinterpret_tensor(
buf28, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 1,
4), (4, 1, 1), 0), reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 4),
0), primals_15, primals_14, primals_13, primals_8,
reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0), primals_6,
primals_5, primals_4)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class FeatureFusionNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(FeatureFusionNew, self).__init__()
self.z_norm1 = norm_layer(dim)
self.x_norm1 = norm_layer(dim)
self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.z_norm2_1 = norm_layer(dim)
self.z_norm2_2 = norm_layer(dim)
self.x_norm2_1 = norm_layer(dim)
self.x_norm2_2 = norm_layer(dim)
self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
mlp_hidden_dim = int(dim * mlp_ratio)
self.z_norm3 = norm_layer(dim)
self.x_norm3 = norm_layer(dim)
self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
primals_1 = self.z_norm1.weight
primals_2 = self.z_norm1.bias
primals_9 = self.x_norm1.weight
primals_10 = self.x_norm1.bias
primals_4 = self.z_self_attn.q.weight
primals_5 = self.z_self_attn.k.weight
primals_6 = self.z_self_attn.v.weight
primals_8 = self.z_self_attn.proj.weight
primals_11 = self.z_self_attn.proj.bias
primals_13 = self.x_self_attn.q.weight
primals_14 = self.x_self_attn.k.weight
primals_15 = self.x_self_attn.v.weight
primals_17 = self.x_self_attn.proj.weight
primals_18 = self.x_self_attn.proj.bias
primals_19 = self.z_norm2_1.weight
primals_20 = self.z_norm2_1.bias
primals_21 = self.z_norm2_2.weight
primals_22 = self.z_norm2_2.bias
primals_28 = self.x_norm2_1.weight
primals_29 = self.x_norm2_1.bias
primals_30 = self.x_norm2_2.weight
primals_31 = self.x_norm2_2.bias
primals_23 = self.z_x_cross_attention.q.weight
primals_24 = self.z_x_cross_attention.k.weight
primals_25 = self.z_x_cross_attention.v.weight
primals_27 = self.z_x_cross_attention.proj.weight
primals_32 = self.z_x_cross_attention.proj.bias
primals_33 = self.x_z_cross_attention.q.weight
primals_34 = self.x_z_cross_attention.k.weight
primals_35 = self.x_z_cross_attention.v.weight
primals_37 = self.x_z_cross_attention.proj.weight
primals_38 = self.x_z_cross_attention.proj.bias
primals_39 = self.z_norm3.weight
primals_40 = self.z_norm3.bias
primals_44 = self.x_norm3.weight
primals_45 = self.x_norm3.bias
primals_41 = self.z_mlp.fc1.weight
primals_42 = self.z_mlp.fc1.bias
primals_43 = self.z_mlp.fc2.weight
primals_46 = self.z_mlp.fc2.bias
primals_47 = self.x_mlp.fc1.weight
primals_48 = self.x_mlp.fc1.bias
primals_49 = self.x_mlp.fc2.weight
primals_50 = self.x_mlp.fc2.bias
primals_3 = input_0
primals_12 = input_1
primals_7 = input_2
primals_16 = input_3
primals_26 = input_4
primals_36 = input_5
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50])
return output[0], output[1]
|
zhangzhengde0225/SwinTrack
|
FeatureFusion
| false
| 16,842
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
TargetQueryDecoderLayer
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TargetQueryDecoderLayer(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(TargetQueryDecoderLayer, self).__init__()
self.norm_1 = norm_layer(dim)
self.self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_2_query = norm_layer(dim)
self.norm_2_memory = norm_layer(dim)
self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_3 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=
act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, query, memory, query_pos, memory_pos):
"""
Args:
query (torch.Tensor): (B, num_queries, C)
memory (torch.Tensor): (B, L, C)
query_pos (torch.Tensor): (1 or B, num_queries, C)
memory_pos (torch.Tensor): (1 or B, L, C)
Returns:
torch.Tensor: (B, num_queries, C)
"""
query = query + self.drop_path(self.self_attn(self.norm_1(query),
query_pos, query_pos, None))
query = query + self.drop_path(self.cross_attn(self.norm_2_query(
query), self.norm_2_memory(memory), query_pos, memory_pos, None))
query = query + self.drop_path(self.mlp(self.norm_3(query)))
return query
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x4 = xindex // 4
x5 = xindex % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + x5, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_9(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_gelu_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (16, 4), (4, 1))
assert_size_stride(primals_24, (16,), (1,))
assert_size_stride(primals_25, (4, 16), (16, 1))
assert_size_stride(primals_26, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, primals_4, buf2, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf7, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf4
triton_poi_fused_clone_2[grid(16, 4)](buf5, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
triton_poi_fused__softmax_4[grid(256)](buf10, buf11, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf5
triton_poi_fused_clone_2[grid(16, 4)](buf6, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf13, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf15,
primals_9, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf19 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(4)](primals_14, buf18,
buf19, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_14,
buf18, buf19, primals_12, primals_13, primals_16, buf20, buf23,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf18
del buf19
del primals_12
del primals_13
del primals_16
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf15,
primals_9, buf16, buf17, primals_10, primals_11, primals_4,
buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
del primals_4
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf22)
buf24 = reinterpret_tensor(buf17, (4, 4), (4, 1), 0)
del buf17
extern_kernels.mm(buf23, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf24)
buf25 = reinterpret_tensor(buf16, (4, 4), (4, 1), 0)
del buf16
extern_kernels.mm(buf20, reinterpret_tensor(primals_18, (4, 4), (1,
4), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf22, buf26, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf27 = reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0)
del buf22
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf24, (16, 1, 1), (1, 1, 1), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf27
triton_poi_fused__softmax_9[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf28, (16, 4, 1), (4, 1, 1),
0), reinterpret_tensor(buf25, (16, 1, 1), (1, 1, 1), 0), out=buf29)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf29, buf30, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf31 = reinterpret_tensor(buf29, (16, 4), (4, 1), 0)
del buf29
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0)
del buf31
triton_poi_fused_add_10[grid(64)](buf32, primals_3, buf15,
primals_9, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
buf33 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf34 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_0[grid(16)](buf32, buf33, buf34,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_11[grid(64)](buf32, buf33, buf34,
primals_21, primals_22, buf35, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf33
del buf34
del primals_22
buf36 = reinterpret_tensor(buf10, (16, 16), (16, 1), 0)
del buf10
extern_kernels.addmm(primals_24, reinterpret_tensor(buf35, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_23, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf36)
del primals_24
buf37 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_12[grid(256)](buf36, buf37, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf37, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_25, (16, 4), (1, 16), 0), out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4), (16, 4, 1), 0)
del buf38
triton_poi_fused_add_13[grid(64)](buf39, buf32, primals_26, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_26
return (buf39, primals_3, primals_9, primals_10, primals_14, primals_21,
reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), buf15, buf20, reinterpret_tensor(buf21, (16, 4), (4, 1),
0), buf23, buf28, reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
buf32, reinterpret_tensor(buf35, (16, 4), (4, 1), 0), buf36,
reinterpret_tensor(buf37, (16, 16), (16, 1), 0), primals_25,
primals_23, primals_19, reinterpret_tensor(buf25, (16, 1, 1), (1, 1,
4), 0), reinterpret_tensor(buf26, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf24, (16, 1, 1), (1, 4, 1), 0), primals_18,
primals_17, primals_15, primals_8, reinterpret_tensor(buf12, (16, 1,
4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0), primals_7,
primals_6, primals_5)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TargetQueryDecoderLayerNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(TargetQueryDecoderLayerNew, self).__init__()
self.norm_1 = norm_layer(dim)
self.self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_2_query = norm_layer(dim)
self.norm_2_memory = norm_layer(dim)
self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_3 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=
act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.norm_1.weight
primals_2 = self.norm_1.bias
primals_4 = self.self_attn.q.weight
primals_5 = self.self_attn.k.weight
primals_6 = self.self_attn.v.weight
primals_7 = self.self_attn.proj.weight
primals_9 = self.self_attn.proj.bias
primals_10 = self.norm_2_query.weight
primals_11 = self.norm_2_query.bias
primals_12 = self.norm_2_memory.weight
primals_13 = self.norm_2_memory.bias
primals_8 = self.cross_attn.q.weight
primals_14 = self.cross_attn.k.weight
primals_15 = self.cross_attn.v.weight
primals_16 = self.cross_attn.proj.weight
primals_20 = self.cross_attn.proj.bias
primals_21 = self.norm_3.weight
primals_22 = self.norm_3.bias
primals_23 = self.mlp.fc1.weight
primals_24 = self.mlp.fc1.bias
primals_25 = self.mlp.fc2.weight
primals_26 = self.mlp.fc2.bias
primals_3 = input_0
primals_17 = input_1
primals_18 = input_2
primals_19 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26])
return output[0]
|
zhangzhengde0225/SwinTrack
|
TargetQueryDecoderLayer
| false
| 16,843
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, kernel_size):
super(Actor, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=kernel_size)
self.conv2 = nn.Conv2d(16, 4, kernel_size=kernel_size)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv1_ = nn.Conv2d(4, 16, kernel_size=kernel_size, stride=2)
self.conv2_ = nn.Conv2d(16, 3, kernel_size=kernel_size, stride=2)
def forward(self, inputs):
x = inputs
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = F.relu(self.conv1_(x))
mu = F.tanh(self.conv2_(x))
return mu
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 53824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 4
x0 = xindex % 3364
x4 = xindex // 3364
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3392 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 13456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = xindex // 29 % 29
x4 = xindex // 841
x3 = xindex // 3364
x5 = xindex % 3364
tmp0 = tl.load(in_ptr0 + (2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (58 + 2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (59 + 2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 3392 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 3456 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 300
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (16, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (3, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(238144)](buf1, primals_3,
238144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 58, 58), (13456, 3364, 58, 1))
buf3 = empty_strided_cuda((4, 4, 58, 58), (13568, 3392, 58, 1),
torch.float32)
triton_poi_fused_convolution_relu_1[grid(53824)](buf2, primals_5,
buf3, 53824, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch
.float32)
buf5 = empty_strided_cuda((4, 4, 29, 29), (3456, 841, 29, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(13456)](buf3, buf4,
buf5, 13456, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 13, 13), (2704, 169, 13, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(10816)](buf7, primals_7,
10816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 3, 5, 5), (75, 25, 5, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_tanh_4[grid(300)](buf9, primals_9, 300,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8,
buf1, buf3, buf4, buf5, buf7, buf9)
class ActorNew(nn.Module):
def __init__(self, kernel_size):
super(ActorNew, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=kernel_size)
self.conv2 = nn.Conv2d(16, 4, kernel_size=kernel_size)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv1_ = nn.Conv2d(4, 16, kernel_size=kernel_size, stride=2)
self.conv2_ = nn.Conv2d(16, 3, kernel_size=kernel_size, stride=2)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv1_.weight
primals_7 = self.conv1_.bias
primals_8 = self.conv2_.weight
primals_9 = self.conv2_.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
zwc662/SequentialAttack
|
Actor
| false
| 16,844
|
[
"MIT"
] | 116
|
677b19c51ea76d794939ee126fccd75ffa0e6fe6
|
https://github.com/zwc662/SequentialAttack/tree/677b19c51ea76d794939ee126fccd75ffa0e6fe6
|
StdConv2dSame
|
import math
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.utils.data.distributed
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
class StdConv2dSame(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(self, in_channel, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=False, eps=1e-05):
super().__init__(in_channel, out_channels, kernel_size, stride=
stride, padding=0, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def get_weight(self):
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True,
unbiased=False)
weight = (self.weight - mean) / (std + self.eps)
return weight
def forward(self, x):
x = pad_same(x, self.get_weight().shape[-2:], self.stride, self.
dilation)
return F.conv2d(x, self.get_weight(), self.bias, self.stride, (0, 0
), self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_per_fused_add_div_std_mean_sub_1(in_out_ptr0, in_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp0 - tmp10
tmp21 = 1e-05
tmp22 = tmp19 + tmp21
tmp23 = tmp20 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_2, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_std_mean_sub_1[grid(4)](buf4, primals_1,
buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = extern_kernels.convolution(buf0, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
return buf6, primals_1, buf0, buf4, buf5
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
class StdConv2dSameNew(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(self, in_channel, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=False, eps=1e-05):
super().__init__(in_channel, out_channels, kernel_size, stride=
stride, padding=0, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def get_weight(self):
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True,
unbiased=False)
weight = (self.weight - mean) / (std + self.eps)
return weight
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ziniuwan/maed
|
StdConv2dSame
| false
| 16,845
|
[
"MIT"
] | 145
|
9e1f1c37eba81da86c8d9c62dc9be41a01abff5b
|
https://github.com/ziniuwan/maed/tree/9e1f1c37eba81da86c8d9c62dc9be41a01abff5b
|
MDNHead
|
from torch.nn import Module
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions import Categorical
from torch.nn.utils import vector_to_parameters
from torch.nn.utils import parameters_to_vector
def ortho_init(module, nonlinearity=None, weight_scale=1.0, constant_bias=0.0):
"""Applies orthogonal initialization for the parameters of a given module.
Args:
module (nn.Module): A module to apply orthogonal initialization over its parameters.
nonlinearity (str, optional): Nonlinearity followed by forward pass of the module. When nonlinearity
is not ``None``, the gain will be calculated and :attr:`weight_scale` will be ignored.
Default: ``None``
weight_scale (float, optional): Scaling factor to initialize the weight. Ignored when
:attr:`nonlinearity` is not ``None``. Default: 1.0
constant_bias (float, optional): Constant value to initialize the bias. Default: 0.0
.. note::
Currently, the only supported :attr:`module` are elementary neural network layers, e.g.
nn.Linear, nn.Conv2d, nn.LSTM. The submodules are not supported.
Example::
>>> a = nn.Linear(2, 3)
>>> ortho_init(a)
"""
if nonlinearity is not None:
gain = nn.init.calculate_gain(nonlinearity)
else:
gain = weight_scale
if isinstance(module, (nn.RNNBase, nn.RNNCellBase)):
for name, param in module.named_parameters():
if 'weight_' in name:
nn.init.orthogonal_(param, gain=gain)
elif 'bias_' in name:
nn.init.constant_(param, constant_bias)
else:
nn.init.orthogonal_(module.weight, gain=gain)
nn.init.constant_(module.bias, constant_bias)
class MDNHead(Module):
def __init__(self, in_features, out_features, num_density, **kwargs):
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.num_density = num_density
self.pi_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.pi_head, weight_scale=0.01, constant_bias=0.0)
self.mean_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.mean_head, weight_scale=0.01, constant_bias=0.0)
self.logvar_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.logvar_head, weight_scale=0.01, constant_bias=0.0)
def forward(self, x):
logit_pi = self.pi_head(x).view(-1, self.num_density, self.out_features
)
mean = self.mean_head(x).view(-1, self.num_density, self.out_features)
logvar = self.logvar_head(x).view(-1, self.num_density, self.
out_features)
std = torch.exp(0.5 * logvar)
return logit_pi, mean, std
def loss(self, logit_pi, mean, std, target):
"""Calculate the MDN loss function.
The loss function (negative log-likelihood) is defined by:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\prod_{d=1}^{D} \\pi_{k}(x_{n, d})
\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d}) \\right) \\right)
For better numerical stability, we could use log-scale:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\exp \\left\\{ \\sum_{d=1}^{D}
\\ln\\pi_{k}(x_{n, d}) + \\ln\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d})
\\right) \\right\\} \\right)
.. note::
One should always use the second formula via log-sum-exp trick. The first formula
is numerically unstable resulting in +/- ``Inf`` and ``NaN`` error.
The log-sum-exp trick is defined by
.. math::
\\log\\sum_{i=1}^{N}\\exp(x_i) = a + \\log\\sum_{i=1}^{N}\\exp(x_i - a)
where :math:`a = \\max_i(x_i)`
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
target (Tensor): target tensor, shape [N, D]
Returns:
Tensor: calculated loss
"""
target = target.unsqueeze(1)
log_pi = F.log_softmax(logit_pi, dim=1)
dist = Normal(mean, std)
log_probs = dist.log_prob(target)
joint_log_probs = torch.sum(log_pi + log_probs, dim=-1, keepdim=False)
loss = torch.logsumexp(joint_log_probs, dim=-1, keepdim=False)
loss = -loss.mean(0)
return loss
def sample(self, logit_pi, mean, std, tau=1.0):
"""Sample from Gaussian mixtures using reparameterization trick.
- Firstly sample categorically over mixing coefficients to determine a specific Gaussian
- Then sample from selected Gaussian distribution
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
tau (float): temperature during sampling, it controls uncertainty.
* If :math:`\\tau > 1`: increase uncertainty
* If :math:`\\tau < 1`: decrease uncertainty
Returns:
Tensor: sampled data with shape [N, D]
"""
N, K, D = logit_pi.shape
pi = F.softmax(logit_pi / tau, dim=1)
pi = pi.permute(0, 2, 1).view(-1, K)
mean = mean.permute(0, 2, 1).view(-1, K)
std = std.permute(0, 2, 1).view(-1, K)
pi_samples = Categorical(pi).sample()
mean = mean[torch.arange(N * D), pi_samples]
std = std[torch.arange(N * D), pi_samples]
eps = torch.randn_like(std)
samples = mean + eps * std * np.sqrt(tau)
samples = samples.view(N, D)
return samples
class Module(nn.Module):
"""Wrap PyTorch nn.module to provide more helper functions. """
def __init__(self, **kwargs):
super().__init__()
for key, val in kwargs.items():
self.__setattr__(key, val)
@property
def num_params(self):
"""Returns the total number of parameters in the neural network. """
return sum(param.numel() for param in self.parameters())
@property
def num_trainable_params(self):
"""Returns the total number of trainable parameters in the neural network."""
return sum(param.numel() for param in self.parameters() if param.
requires_grad)
@property
def num_untrainable_params(self):
"""Returns the total number of untrainable parameters in the neural network. """
return sum(param.numel() for param in self.parameters() if not
param.requires_grad)
def to_vec(self):
"""Returns the network parameters as a single flattened vector. """
return parameters_to_vector(parameters=self.parameters())
def from_vec(self, x):
"""Set the network parameters from a single flattened vector.
Args:
x (Tensor): A single flattened vector of the network parameters with consistent size.
"""
vector_to_parameters(vec=x, parameters=self.parameters())
def save(self, f):
"""Save the network parameters to a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
.. note::
It uses the highest pickle protocol to serialize the network parameters.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
torch.save(obj=self.state_dict(), f=f, pickle_protocol=pickle.
HIGHEST_PROTOCOL)
def load(self, f):
"""Load the network parameters from a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
self.load_state_dict(torch.load(f))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'num_density': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions import Categorical
from torch.nn.utils import vector_to_parameters
from torch.nn.utils import parameters_to_vector
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused_exp_mul_0[grid(1024)](buf3, primals_7, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3
def ortho_init(module, nonlinearity=None, weight_scale=1.0, constant_bias=0.0):
"""Applies orthogonal initialization for the parameters of a given module.
Args:
module (nn.Module): A module to apply orthogonal initialization over its parameters.
nonlinearity (str, optional): Nonlinearity followed by forward pass of the module. When nonlinearity
is not ``None``, the gain will be calculated and :attr:`weight_scale` will be ignored.
Default: ``None``
weight_scale (float, optional): Scaling factor to initialize the weight. Ignored when
:attr:`nonlinearity` is not ``None``. Default: 1.0
constant_bias (float, optional): Constant value to initialize the bias. Default: 0.0
.. note::
Currently, the only supported :attr:`module` are elementary neural network layers, e.g.
nn.Linear, nn.Conv2d, nn.LSTM. The submodules are not supported.
Example::
>>> a = nn.Linear(2, 3)
>>> ortho_init(a)
"""
if nonlinearity is not None:
gain = nn.init.calculate_gain(nonlinearity)
else:
gain = weight_scale
if isinstance(module, (nn.RNNBase, nn.RNNCellBase)):
for name, param in module.named_parameters():
if 'weight_' in name:
nn.init.orthogonal_(param, gain=gain)
elif 'bias_' in name:
nn.init.constant_(param, constant_bias)
else:
nn.init.orthogonal_(module.weight, gain=gain)
nn.init.constant_(module.bias, constant_bias)
class MDNHeadNew(Module):
def __init__(self, in_features, out_features, num_density, **kwargs):
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.num_density = num_density
self.pi_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.pi_head, weight_scale=0.01, constant_bias=0.0)
self.mean_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.mean_head, weight_scale=0.01, constant_bias=0.0)
self.logvar_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.logvar_head, weight_scale=0.01, constant_bias=0.0)
def loss(self, logit_pi, mean, std, target):
"""Calculate the MDN loss function.
The loss function (negative log-likelihood) is defined by:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\prod_{d=1}^{D} \\pi_{k}(x_{n, d})
\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d}) \\right) \\right)
For better numerical stability, we could use log-scale:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\exp \\left\\{ \\sum_{d=1}^{D}
\\ln\\pi_{k}(x_{n, d}) + \\ln\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d})
\\right) \\right\\} \\right)
.. note::
One should always use the second formula via log-sum-exp trick. The first formula
is numerically unstable resulting in +/- ``Inf`` and ``NaN`` error.
The log-sum-exp trick is defined by
.. math::
\\log\\sum_{i=1}^{N}\\exp(x_i) = a + \\log\\sum_{i=1}^{N}\\exp(x_i - a)
where :math:`a = \\max_i(x_i)`
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
target (Tensor): target tensor, shape [N, D]
Returns:
Tensor: calculated loss
"""
target = target.unsqueeze(1)
log_pi = F.log_softmax(logit_pi, dim=1)
dist = Normal(mean, std)
log_probs = dist.log_prob(target)
joint_log_probs = torch.sum(log_pi + log_probs, dim=-1, keepdim=False)
loss = torch.logsumexp(joint_log_probs, dim=-1, keepdim=False)
loss = -loss.mean(0)
return loss
def sample(self, logit_pi, mean, std, tau=1.0):
"""Sample from Gaussian mixtures using reparameterization trick.
- Firstly sample categorically over mixing coefficients to determine a specific Gaussian
- Then sample from selected Gaussian distribution
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
tau (float): temperature during sampling, it controls uncertainty.
* If :math:`\\tau > 1`: increase uncertainty
* If :math:`\\tau < 1`: decrease uncertainty
Returns:
Tensor: sampled data with shape [N, D]
"""
N, K, D = logit_pi.shape
pi = F.softmax(logit_pi / tau, dim=1)
pi = pi.permute(0, 2, 1).view(-1, K)
mean = mean.permute(0, 2, 1).view(-1, K)
std = std.permute(0, 2, 1).view(-1, K)
pi_samples = Categorical(pi).sample()
mean = mean[torch.arange(N * D), pi_samples]
std = std[torch.arange(N * D), pi_samples]
eps = torch.randn_like(std)
samples = mean + eps * std * np.sqrt(tau)
samples = samples.view(N, D)
return samples
def forward(self, input_0):
primals_1 = self.pi_head.weight
primals_2 = self.pi_head.bias
primals_4 = self.mean_head.weight
primals_5 = self.mean_head.bias
primals_6 = self.logvar_head.weight
primals_7 = self.logvar_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
class Module(nn.Module):
"""Wrap PyTorch nn.module to provide more helper functions. """
def __init__(self, **kwargs):
super().__init__()
for key, val in kwargs.items():
self.__setattr__(key, val)
@property
def num_params(self):
"""Returns the total number of parameters in the neural network. """
return sum(param.numel() for param in self.parameters())
@property
def num_trainable_params(self):
"""Returns the total number of trainable parameters in the neural network."""
return sum(param.numel() for param in self.parameters() if param.
requires_grad)
@property
def num_untrainable_params(self):
"""Returns the total number of untrainable parameters in the neural network. """
return sum(param.numel() for param in self.parameters() if not
param.requires_grad)
def to_vec(self):
"""Returns the network parameters as a single flattened vector. """
return parameters_to_vector(parameters=self.parameters())
def from_vec(self, x):
"""Set the network parameters from a single flattened vector.
Args:
x (Tensor): A single flattened vector of the network parameters with consistent size.
"""
vector_to_parameters(vec=x, parameters=self.parameters())
def save(self, f):
"""Save the network parameters to a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
.. note::
It uses the highest pickle protocol to serialize the network parameters.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
torch.save(obj=self.state_dict(), f=f, pickle_protocol=pickle.
HIGHEST_PROTOCOL)
def load(self, f):
"""Load the network parameters from a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
self.load_state_dict(torch.load(f))
|
zuoxingdong/lagom
|
MDNHead
| false
| 16,846
|
[
"MIT"
] | 383
|
3b6710804dbc79c6dffb369ac87c68f4055ab6cd
|
https://github.com/zuoxingdong/lagom/tree/3b6710804dbc79c6dffb369ac87c68f4055ab6cd
|
_ASPPModule
|
import torch
import torch.nn as nn
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(_ASPPModule, self).__init__()
self.GCT = GCT(inplanes)
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=
kernel_size, stride=1, padding=padding, dilation=dilation, bias
=False)
self.bn = nn.GroupNorm(int(planes / 4), planes)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def forward(self, x):
x = self.GCT(x)
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'kernel_size': 4, 'padding': 4,
'dilation': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 1)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + 2)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 3)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_4(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 324
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
r3 = rindex // 81
tmp0 = tl.load(in_ptr0 + (r1 + 324 * x0), rmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, rmask, eviction_policy='evict_last',
other=0.0)
tmp26 = tl.load(in_ptr2 + r3, rmask, eviction_policy='evict_last',
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp8 = tl.full([1], 324, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp17 = tmp0 - tmp10
tmp18 = 324.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + 324 * x0), tmp29, rmask)
tl.store(out_ptr3 + (r1 + 324 * x0), tmp31, rmask)
tl.store(out_ptr4 + x0, tmp22, None)
tl.store(out_ptr0 + x0, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(16)](buf1, primals_1, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_poi_fused_add_mean_mul_pow_1[grid(4)](buf1, primals_2, buf2,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_2[grid(16)](buf1,
primals_2, primals_3, buf2, primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_3[grid(256)](primals_1,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 9, 9), (324, 81, 9, 1))
buf6 = buf2
del buf2
buf10 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32
)
buf11 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.bool)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_native_group_norm_relu_threshold_backward_4[grid(4)](
buf5, primals_6, primals_7, buf6, buf10, buf11, buf9, 4, 324,
num_warps=4, num_stages=1)
del primals_7
return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf4, buf5, reinterpret_tensor(buf6, (4, 1), (1, 1
), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11)
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
class _ASPPModuleNew(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(_ASPPModuleNew, self).__init__()
self.GCT = GCT(inplanes)
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=
kernel_size, stride=1, padding=padding, dilation=dilation, bias
=False)
self.bn = nn.GroupNorm(int(planes / 4), planes)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_2 = self.GCT.alpha
primals_3 = self.GCT.gamma
primals_4 = self.GCT.beta
primals_1 = self.atrous_conv.weight
primals_6 = self.bn.weight
primals_7 = self.bn.bias
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yoxu515/CFBI
|
_ASPPModule
| false
| 16,847
|
[
"BSD-3-Clause"
] | 312
|
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
Gaussian
|
import torch
from torch import Tensor
import torch.utils.tensorboard
import torch.utils.data
class Gaussian(torch.nn.Module):
"""Gaussian activation"""
def forward(self, x: 'Tensor') ->Tensor:
return torch.exp(-x * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.tensorboard
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GaussianNew(torch.nn.Module):
"""Gaussian activation"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yangyinuo823/torchani
|
Gaussian
| false
| 16,848
|
[
"MIT"
] | 305
|
b0cd62eda59829d197b3c37f2215ba1af64f1c8d
|
https://github.com/yangyinuo823/torchani/tree/b0cd62eda59829d197b3c37f2215ba1af64f1c8d
|
waspIntrinsicComposer
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class waspIntrinsicComposer(nn.Module):
def __init__(self, opt):
super(waspIntrinsicComposer, self).__init__()
self.ngpu = opt.ngpu
self.nc = opt.nc
def forward(self, shading, albedo):
self.shading = shading.repeat(1, self.nc, 1, 1)
self.img = torch.mul(self.shading, albedo)
return self.img
def get_inputs():
return [torch.rand([4, 16, 4, 4]), torch.rand([4, 64, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(ngpu=False, nc=4)}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 64
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * (x1 % 16) + 256 * x2), None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp0, None)
tl.store(out_ptr1 + x3, tmp2, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(arg1_1, (4, 64, 4, 4), (1024, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
buf1 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_mul_repeat_0[grid(4096)](arg0_1, arg1_1, buf0,
buf1, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1, buf0
class waspIntrinsicComposerNew(nn.Module):
def __init__(self, opt):
super(waspIntrinsicComposerNew, self).__init__()
self.ngpu = opt.ngpu
self.nc = opt.nc
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhixinshu/DeformingAutoencoders-pytorch
|
waspIntrinsicComposer
| false
| 16,849
|
[
"BSD-2-Clause"
] | 112
|
72996c5d11ae25dd0051bb51df353fef88e65742
|
https://github.com/zhixinshu/DeformingAutoencoders-pytorch/tree/72996c5d11ae25dd0051bb51df353fef88e65742
|
VGG16
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class VGG16(nn.Module):
def __init__(self, conv5_dilation=1):
super(VGG16, self).__init__()
None
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.not_training = []
self.from_scratch_layers = []
def forward(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = self.pool1(x)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = self.pool2(x)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = self.pool3(x)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
x = F.relu(self.conv5_1(x))
x = F.relu(self.conv5_2(x))
x = F.relu(self.conv5_3(x))
return x
@property
def out_channel(self):
return 512
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 32
x1 = xindex // 64 % 32
x0 = xindex % 64
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4160 + x0 + 128 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 128 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-4032 + x0 + 128 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-64 + x0 + 128 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4032 + x0 + 128 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 16
x1 = xindex // 128 % 16
x0 = xindex % 128
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4224 + x0 + 256 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 256 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3968 + x0 + 256 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-128 + x0 + 256 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3968 + x0 + 256 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 8
x1 = xindex // 256 % 8
x0 = xindex % 256
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4352 + x0 + 512 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 512 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3840 + x0 + 512 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-256 + x0 + 512 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3840 + x0 + 512 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17,
buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23,
buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_13[grid(262144)](buf27,
primals_11, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_13[grid(262144)](buf29,
primals_13, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_13[grid(262144)](buf31,
primals_15, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31,
buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_15[grid(131072)](buf35,
primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_15[grid(131072)](buf37,
primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_15[grid(131072)](buf39,
primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_15[grid(131072)](buf41,
primals_23, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_15[grid(131072)](buf43,
primals_25, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(2048, 64)
](buf44, primals_27, buf45, buf46, 2048, 64, XBLOCK=32, YBLOCK=
32, num_warps=4, num_stages=1)
del buf44
del primals_27
return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21,
buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35,
buf37, buf39, buf41, buf43, buf46)
class VGG16New(nn.Module):
def __init__(self, conv5_dilation=1):
super(VGG16New, self).__init__()
None
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.not_training = []
self.from_scratch_layers = []
@property
def out_channel(self):
return 512
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_2.weight
primals_23 = self.conv5_2.bias
primals_24 = self.conv5_1.weight
primals_25 = self.conv5_1.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
|
yaoqi-zd/SGAN
|
VGG16
| false
| 16,850
|
[
"MIT"
] | 48
|
43d8a859b03967e2423a73ef1ba332ee71714ba4
|
https://github.com/yaoqi-zd/SGAN/tree/43d8a859b03967e2423a73ef1ba332ee71714ba4
|
BridgeConnection
|
import torch
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
class BridgeConnection(nn.Module):
def __init__(self, in_dim, out_dim, dout_p):
super(BridgeConnection, self).__init__()
self.norm = nn.LayerNorm(in_dim)
self.linear = nn.Linear(in_dim, out_dim)
self.dropout = nn.Dropout(dout_p)
self.activation = nn.ReLU()
def forward(self, x):
x = self.norm(x)
x = self.linear(x)
x = self.dropout(x)
return self.activation(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'dout_p': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4,
primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf5, primals_4
class BridgeConnectionNew(nn.Module):
def __init__(self, in_dim, out_dim, dout_p):
super(BridgeConnectionNew, self).__init__()
self.norm = nn.LayerNorm(in_dim)
self.linear = nn.Linear(in_dim, out_dim)
self.dropout = nn.Dropout(dout_p)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
valterlej/CustomBMT
|
BridgeConnection
| false
| 16,851
|
[
"MIT"
] | 157
|
c9326752d1355c81f845f2caab9c047be76067de
|
https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de
|
FeatureEmbedder
|
import torch
import numpy as np
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
class FeatureEmbedder(nn.Module):
def __init__(self, d_feat, d_model):
super(FeatureEmbedder, self).__init__()
self.d_model = d_model
self.embedder = nn.Linear(d_feat, d_model)
self.activation = nn.ReLU()
def forward(self, x):
x = self.embedder(x)
x = x * np.sqrt(self.d_model)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_feat': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_sqrt_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_mul_relu_sqrt_threshold_backward_0[grid(256)](buf1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class FeatureEmbedderNew(nn.Module):
def __init__(self, d_feat, d_model):
super(FeatureEmbedderNew, self).__init__()
self.d_model = d_model
self.embedder = nn.Linear(d_feat, d_model)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_1 = self.embedder.weight
primals_2 = self.embedder.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
valterlej/CustomBMT
|
FeatureEmbedder
| false
| 16,852
|
[
"MIT"
] | 157
|
c9326752d1355c81f845f2caab9c047be76067de
|
https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de
|
SpatialCGNL
|
import torch
import torch.nn as nn
class SpatialCGNL(nn.Module):
"""Spatial CGNL block with dot production kernel for image classfication.
"""
def __init__(self, inplanes, planes, use_scale=False, groups=8):
self.use_scale = use_scale
self.groups = groups
super(SpatialCGNL, self).__init__()
self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
groups=self.groups, bias=False)
self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)
if self.use_scale:
None
if self.groups:
None
def kernel(self, t, p, g, b, c, h, w):
"""The linear kernel (dot production).
Args:
t: output of conv theata
p: output of conv phi
g: output of conv g
b: batch size
c: channels number
h: height of featuremaps
w: width of featuremaps
"""
t = t.view(b, 1, c * h * w)
p = p.view(b, 1, c * h * w)
g = g.view(b, c * h * w, 1)
att = torch.bmm(p, g)
if self.use_scale:
att = att.div((c * h * w) ** 0.5)
x = torch.bmm(att, t)
x = x.view(b, c, h, w)
return x
def forward(self, x):
residual = x
t = self.t(x)
p = self.p(x)
g = self.g(x)
b, c, h, w = t.size()
if self.groups and self.groups > 1:
_c = c // self.groups
ts = torch.split(t, split_size_or_sections=_c, dim=1)
ps = torch.split(p, split_size_or_sections=_c, dim=1)
gs = torch.split(g, split_size_or_sections=_c, dim=1)
_t_sequences = []
for i in range(self.groups):
_x = self.kernel(ts[i], ps[i], gs[i], b, _c, h, w)
_t_sequences.append(_x)
x = torch.cat(_t_sequences, dim=1)
else:
x = self.kernel(t, p, g, b, c, h, w)
x = self.z(x)
x = self.gn(x) + residual
return x
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {'inplanes': 64, 'planes': 32}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-4 + x1) + 16384 * x2), tmp9,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-8 + x1) + 16384 * x2), tmp14,
other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + 4096 * (-12 + x1) + 16384 * x2), tmp19,
other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + 4096 * (-16 + x1) + 16384 * x2), tmp24,
other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr5 + (x0 + 4096 * (-20 + x1) + 16384 * x2), tmp29,
other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 28, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr6 + (x0 + 4096 * (-24 + x1) + 16384 * x2), tmp34,
other=0.0)
tmp36 = tmp0 >= tmp32
tl.full([1], 32, tl.int64)
tmp39 = tl.load(in_ptr7 + (x0 + 4096 * (-28 + x1) + 16384 * x2), tmp36,
other=0.0)
tmp40 = tl.where(tmp34, tmp35, tmp39)
tmp41 = tl.where(tmp29, tmp30, tmp40)
tmp42 = tl.where(tmp24, tmp25, tmp41)
tmp43 = tl.where(tmp19, tmp20, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + x3, tmp46, None)
@triton.jit
def triton_red_fused_native_group_norm_1(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.store(out_ptr2 + x0, tmp4, xmask)
@triton.jit
def triton_per_fused_native_group_norm_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 32
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 4096
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4 // 8, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 8, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp2 = tmp0 - tmp1
tmp4 = 32768.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_transpose_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (114688 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (98304 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (81920 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (65536 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (49152 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32768 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16384 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_3, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 0), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1,
0), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 16384), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 16384), out=buf5)
buf6 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 16384), out=buf6)
buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 32768), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 32768), out=buf7)
buf8 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 32768), out=buf8)
buf9 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 49152), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 49152), out=buf9)
buf10 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 49152), out=buf10)
buf11 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 65536), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 65536), out=buf11)
buf12 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf11, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 65536), out=buf12)
buf13 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 81920), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 81920), out=buf13)
buf14 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 81920), out=buf14)
buf15 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 98304), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 98304), out=buf15)
buf16 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf15, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 98304), out=buf16)
buf17 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 114688), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 114688), out=buf17)
buf18 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 114688), out=buf18)
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(524288)](buf4, buf6, buf8, buf10, buf12,
buf14, buf16, buf18, buf19, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=8, bias=None)
assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf21 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1),
torch.float32)
buf22 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1),
torch.float32)
buf23 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1),
torch.float32)
triton_red_fused_native_group_norm_1[grid(128)](buf20, buf21, buf22,
buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf24 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
buf27 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
triton_per_fused_native_group_norm_2[grid(32)](buf21, buf22, buf23,
buf24, buf25, buf27, 32, 4, XBLOCK=32, num_warps=2, num_stages=1)
del buf21
del buf22
del buf23
buf28 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_native_group_norm_3[grid(1048576)](buf20,
buf24, buf25, primals_6, primals_7, primals_1, buf28, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf25
del primals_7
buf29 = reinterpret_tensor(buf8, (4, 16384, 1), (16384, 1, 16384), 0)
del buf8
triton_poi_fused_transpose_4[grid(65536)](buf0, buf29, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf30 = reinterpret_tensor(buf6, (4, 16384, 1), (16384, 1, 16384), 0)
del buf6
triton_poi_fused_transpose_4[grid(65536)](buf1, buf30, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf31 = buf4
del buf4
triton_poi_fused_transpose_4[grid(65536)](buf2, buf31, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf32 = reinterpret_tensor(buf18, (4, 16384, 1), (16384, 1, 16384), 0)
del buf18
triton_poi_fused_transpose_5[grid(65536)](buf0, buf32, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf33 = reinterpret_tensor(buf16, (4, 16384, 1), (16384, 1, 16384), 0)
del buf16
triton_poi_fused_transpose_5[grid(65536)](buf1, buf33, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf34 = buf14
del buf14
triton_poi_fused_transpose_5[grid(65536)](buf2, buf34, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf35 = reinterpret_tensor(buf12, (4, 16384, 1), (16384, 1, 16384), 0)
del buf12
triton_poi_fused_transpose_6[grid(65536)](buf0, buf35, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf36 = reinterpret_tensor(buf10, (4, 16384, 1), (16384, 1, 16384), 0)
del buf10
triton_poi_fused_transpose_6[grid(65536)](buf1, buf36, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf37 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_6[grid(65536)](buf2, buf37, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf38 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_7[grid(65536)](buf0, buf38, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf39 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_7[grid(65536)](buf1, buf39, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf40 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_7[grid(65536)](buf2, buf40, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf41 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_8[grid(65536)](buf0, buf41, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf42 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_8[grid(65536)](buf1, buf42, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf43 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_8[grid(65536)](buf2, buf43, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf44 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_9[grid(65536)](buf0, buf44, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf45 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_9[grid(65536)](buf1, buf45, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf46 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_9[grid(65536)](buf2, buf46, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf47 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_10[grid(65536)](buf0, buf47, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf48 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_10[grid(65536)](buf1, buf48, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf49 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_10[grid(65536)](buf2, buf49, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf50 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_11[grid(65536)](buf0, buf50, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf0
buf51 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_11[grid(65536)](buf1, buf51, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf1
buf52 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_11[grid(65536)](buf2, buf52, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf2
return (buf28, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf19, buf20, reinterpret_tensor(buf24, (4, 8), (8, 1),
0), reinterpret_tensor(buf27, (4, 8), (8, 1), 0), buf17, buf29,
buf30, buf31, buf15, buf32, buf33, buf34, buf13, buf35, buf36,
buf37, buf11, buf38, buf39, buf40, buf9, buf41, buf42, buf43, buf7,
buf44, buf45, buf46, buf5, buf47, buf48, buf49, buf3, buf50, buf51,
buf52)
class SpatialCGNLNew(nn.Module):
"""Spatial CGNL block with dot production kernel for image classfication.
"""
def __init__(self, inplanes, planes, use_scale=False, groups=8):
self.use_scale = use_scale
self.groups = groups
super(SpatialCGNLNew, self).__init__()
self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
groups=self.groups, bias=False)
self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)
if self.use_scale:
None
if self.groups:
None
def kernel(self, t, p, g, b, c, h, w):
"""The linear kernel (dot production).
Args:
t: output of conv theata
p: output of conv phi
g: output of conv g
b: batch size
c: channels number
h: height of featuremaps
w: width of featuremaps
"""
t = t.view(b, 1, c * h * w)
p = p.view(b, 1, c * h * w)
g = g.view(b, c * h * w, 1)
att = torch.bmm(p, g)
if self.use_scale:
att = att.div((c * h * w) ** 0.5)
x = torch.bmm(att, t)
x = x.view(b, c, h, w)
return x
def forward(self, input_0):
primals_2 = self.t.weight
primals_3 = self.p.weight
primals_4 = self.g.weight
primals_5 = self.z.weight
primals_6 = self.gn.weight
primals_7 = self.gn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
zj1008/GALD-DGCNet
|
SpatialCGNL
| false
| 16,853
|
[
"MIT"
] | 127
|
be7ebfe2b3d28ea28a2b4714852999d4af2a785e
|
https://github.com/zj1008/GALD-DGCNet/tree/be7ebfe2b3d28ea28a2b4714852999d4af2a785e
|
MultiheadedAttention
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
def attention(Q, K, V, mask, dropout=None):
d_k = Q.size(-1)
QKt = Q.matmul(K.transpose(-1, -2))
sm_input = QKt / np.sqrt(d_k)
if mask is not None:
sm_input = sm_input.masked_fill(mask == 0, -float('inf'))
softmax = F.softmax(sm_input, dim=-1)
out = softmax.matmul(V)
if dropout is not None:
out = dropout(out)
return out
class MultiheadedAttention(nn.Module):
def __init__(self, d_model_Q, d_model_K, d_model_V, H, dout_p=0.0,
d_model=None):
super(MultiheadedAttention, self).__init__()
self.d_model_Q = d_model_Q
self.d_model_K = d_model_K
self.d_model_V = d_model_V
self.H = H
self.d_model = d_model
self.dout_p = dout_p
if self.d_model is None:
None
self.d_model = self.d_model_Q
self.d_k = self.d_model // H
self.linear_Q2d = nn.Linear(self.d_model_Q, self.d_model)
self.linear_K2d = nn.Linear(self.d_model_K, self.d_model)
self.linear_V2d = nn.Linear(self.d_model_V, self.d_model)
self.linear_d2Q = nn.Linear(self.d_model, self.d_model_Q)
self.dropout = nn.Dropout(self.dout_p)
assert self.d_model % H == 0
def forward(self, Q, K, V, mask):
"""
Q, K, V: (B, Sq, Dq), (B, Sk, Dk), (B, Sv, Dv)
mask: (B, 1, Sk)
Sk = Sv,
Dk != self.d_k
Also: m1 is the target modality (queries); m2 is the source modality (keys, values)
"""
B, Sq, _d_model_Q = Q.shape
Q = self.linear_Q2d(Q)
K = self.linear_K2d(K)
V = self.linear_V2d(V)
Q = Q.view(B, -1, self.H, self.d_k).transpose(-3, -2)
K = K.view(B, -1, self.H, self.d_k).transpose(-3, -2)
V = V.view(B, -1, self.H, self.d_k).transpose(-3, -2)
if mask is not None:
mask = mask.unsqueeze(1)
Q = attention(Q, K, V, mask, self.dropout)
Q = Q.transpose(-3, -2).contiguous().view(B, Sq, self.d_model)
Q = self.linear_d2Q(Q)
return Q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model_Q': 4, 'd_model_K': 4, 'd_model_V': 4, 'H': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = float('-inf')
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp4 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp2 = float('-inf')
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + x5, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_masked_fill_2[grid(64)](buf6, buf5, buf7,
buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_masked_fill_3[grid(256)](buf9, buf6, buf7,
buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_12
return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
def attention(Q, K, V, mask, dropout=None):
d_k = Q.size(-1)
QKt = Q.matmul(K.transpose(-1, -2))
sm_input = QKt / np.sqrt(d_k)
if mask is not None:
sm_input = sm_input.masked_fill(mask == 0, -float('inf'))
softmax = F.softmax(sm_input, dim=-1)
out = softmax.matmul(V)
if dropout is not None:
out = dropout(out)
return out
class MultiheadedAttentionNew(nn.Module):
def __init__(self, d_model_Q, d_model_K, d_model_V, H, dout_p=0.0,
d_model=None):
super(MultiheadedAttentionNew, self).__init__()
self.d_model_Q = d_model_Q
self.d_model_K = d_model_K
self.d_model_V = d_model_V
self.H = H
self.d_model = d_model
self.dout_p = dout_p
if self.d_model is None:
None
self.d_model = self.d_model_Q
self.d_k = self.d_model // H
self.linear_Q2d = nn.Linear(self.d_model_Q, self.d_model)
self.linear_K2d = nn.Linear(self.d_model_K, self.d_model)
self.linear_V2d = nn.Linear(self.d_model_V, self.d_model)
self.linear_d2Q = nn.Linear(self.d_model, self.d_model_Q)
self.dropout = nn.Dropout(self.dout_p)
assert self.d_model % H == 0
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.linear_Q2d.weight
primals_3 = self.linear_Q2d.bias
primals_4 = self.linear_K2d.weight
primals_5 = self.linear_K2d.bias
primals_7 = self.linear_V2d.weight
primals_8 = self.linear_V2d.bias
primals_11 = self.linear_d2Q.weight
primals_12 = self.linear_d2Q.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
valterlej/CustomBMT
|
MultiheadedAttention
| false
| 16,854
|
[
"MIT"
] | 157
|
c9326752d1355c81f845f2caab9c047be76067de
|
https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de
|
SinkhornDistance
|
import torch
import torch.utils.data
class SinkhornDistance(torch.nn.Module):
"""
Given two empirical measures each with :math:`P_1` locations
:math:`x\\in\\mathbb{R}^{D_1}` and :math:`P_2` locations :math:`y\\in\\mathbb{R}^{D_2}`,
outputs an approximation of the regularized OT cost for point clouds.
Args:
eps (float): regularization coefficient
max_iter (int): maximum number of Sinkhorn iterations
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'none'
Shape:
- Input: :math:`(N, P_1, D_1)`, :math:`(N, P_2, D_2)`
- Output: :math:`(N)` or :math:`()`, depending on `reduction`
"""
def __init__(self, eps=0.001, max_iter=100, reduction='none'):
super(SinkhornDistance, self).__init__()
self.eps = eps
self.max_iter = max_iter
self.reduction = reduction
def forward(self, mu, nu, C):
u = torch.ones_like(mu)
v = torch.ones_like(nu)
for i in range(self.max_iter):
v = self.eps * (torch.log(nu + 1e-08) - torch.logsumexp(self.M(
C, u, v).transpose(-2, -1), dim=-1)) + v
u = self.eps * (torch.log(mu + 1e-08) - torch.logsumexp(self.M(
C, u, v), dim=-1)) + u
U, V = u, v
pi = torch.exp(self.M(C, U, V)).detach()
cost = torch.sum(pi * C, dim=(-2, -1))
return cost, pi
def M(self, C, u, v):
"""
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / epsilon$"
"""
return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_logsumexp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x2 = xindex // 16 % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp3 + tmp2
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = -tmp7
tmp9 = tmp8 + tmp2
tmp10 = tmp9 + tmp2
tmp11 = tmp10 * tmp5
tmp13 = -tmp12
tmp14 = tmp13 + tmp2
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp5
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = -tmp18
tmp20 = tmp19 + tmp2
tmp21 = tmp20 + tmp2
tmp22 = tmp21 * tmp5
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp25 = -tmp24
tmp26 = tmp25 + tmp2
tmp27 = tmp26 + tmp2
tmp28 = tmp27 * tmp5
tmp29 = triton_helpers.maximum(tmp23, tmp28)
tmp30 = tl_math.abs(tmp29)
tmp31 = float('inf')
tmp32 = tmp30 == tmp31
tmp33 = 0.0
tmp34 = tl.where(tmp32, tmp33, tmp29)
tmp35 = tmp6 - tmp34
tl.store(out_ptr0 + x5, tmp35, xmask)
@triton.jit
def triton_poi_fused_logsumexp_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x3), xmask)
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x3), xmask)
tmp5 = tl.load(in_ptr0 + (8 + x0 + 16 * x3), xmask)
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x3), xmask)
tmp12 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp13 = -tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp15 + tmp14
tmp17 = 1000.0
tmp18 = tmp16 * tmp17
tmp20 = -tmp19
tmp21 = tmp20 + tmp14
tmp22 = tmp21 + tmp14
tmp23 = tmp22 * tmp17
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp26 = -tmp25
tmp27 = tmp26 + tmp14
tmp28 = tmp27 + tmp14
tmp29 = tmp28 * tmp17
tmp30 = triton_helpers.maximum(tmp24, tmp29)
tmp32 = -tmp31
tmp33 = tmp32 + tmp14
tmp34 = tmp33 + tmp14
tmp35 = tmp34 * tmp17
tmp36 = triton_helpers.maximum(tmp30, tmp35)
tmp37 = tl_math.abs(tmp36)
tmp38 = float('inf')
tmp39 = tmp37 == tmp38
tmp40 = 0.0
tmp41 = tl.where(tmp39, tmp40, tmp36)
tmp42 = tmp11 + tmp41
tl.store(out_ptr0 + x4, tmp42, xmask)
@triton.jit
def triton_poi_fused_add_div_logsumexp_neg_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 4 * x4, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr2 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp42 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp45 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr2 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = 1e-08
tmp6 = tmp4 + tmp5
tmp7 = tl_math.log(tmp6)
tmp9 = tmp7 - tmp8
tmp10 = 0.001
tmp11 = tmp9 * tmp10
tmp12 = tmp11 + tmp2
tmp13 = tmp3 + tmp12
tmp14 = 1000.0
tmp15 = tmp13 * tmp14
tmp17 = -tmp16
tmp18 = tmp17 + tmp2
tmp20 = tmp19 + tmp5
tmp21 = tl_math.log(tmp20)
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp10
tmp25 = tmp24 + tmp2
tmp26 = tmp18 + tmp25
tmp27 = tmp26 * tmp14
tmp28 = triton_helpers.maximum(tmp15, tmp27)
tmp30 = -tmp29
tmp31 = tmp30 + tmp2
tmp33 = tmp32 + tmp5
tmp34 = tl_math.log(tmp33)
tmp36 = tmp34 - tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp37 + tmp2
tmp39 = tmp31 + tmp38
tmp40 = tmp39 * tmp14
tmp41 = triton_helpers.maximum(tmp28, tmp40)
tmp43 = -tmp42
tmp44 = tmp43 + tmp2
tmp46 = tmp45 + tmp5
tmp47 = tl_math.log(tmp46)
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp10
tmp51 = tmp50 + tmp2
tmp52 = tmp44 + tmp51
tmp53 = tmp52 * tmp14
tmp54 = triton_helpers.maximum(tmp41, tmp53)
tmp55 = tl_math.abs(tmp54)
tmp56 = float('inf')
tmp57 = tmp55 == tmp56
tmp58 = 0.0
tmp59 = tl.where(tmp57, tmp58, tmp54)
tmp60 = tmp15 - tmp59
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp27 - tmp59
tmp63 = tl_math.exp(tmp62)
tmp64 = tmp61 + tmp63
tmp65 = tmp40 - tmp59
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp53 - tmp59
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tl.store(out_ptr0 + x5, tmp54, xmask)
tl.store(out_ptr1 + x5, tmp70, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr4 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp21 + tmp3
tmp23 = tl_math.log(tmp22)
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp16
tmp27 = tmp26 + tmp18
tmp28 = tmp20 + tmp27
tmp29 = 1000.0
tmp30 = tmp28 * tmp29
tl.store(out_ptr0 + x7, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_neg_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr4 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp23 = tmp22 + tmp3
tmp24 = tl_math.log(tmp23)
tmp26 = tmp24 - tmp25
tmp27 = tmp26 * tmp16
tmp28 = tmp27 + tmp18
tmp29 = tmp21 + tmp28
tmp30 = tmp20 + tmp29
tl.store(out_ptr0 + x7, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_log_logsumexp_sub_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float('inf')
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tl.store(out_ptr0 + x0, tmp34, xmask)
@triton.jit
def triton_poi_fused_add_neg_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr7 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 0.001
tmp4 = tmp2 * tmp3
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp10 = tl_math.log(tmp9)
tmp12 = tl_math.abs(tmp11)
tmp13 = float('inf')
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp10 + tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tmp4 + tmp21
tmp23 = tmp1 + tmp22
tmp26 = tmp25 + tmp6
tmp27 = tl_math.log(tmp26)
tmp29 = tmp27 - tmp28
tmp30 = tmp29 * tmp3
tmp31 = tmp30 + tmp20
tmp32 = tmp24 + tmp31
tmp33 = tmp23 + tmp32
tl.store(out_ptr0 + x7, tmp33, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_sub_8(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float('inf')
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tl.store(out_ptr0 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused_add_neg_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr7 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr8 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 0.001
tmp4 = tmp2 * tmp3
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp10 = tl_math.log(tmp9)
tmp12 = tl_math.abs(tmp11)
tmp13 = float('inf')
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp10 + tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tmp4 + tmp21
tmp23 = tmp1 + tmp22
tmp25 = tmp24 * tmp3
tmp28 = tmp27 + tmp6
tmp29 = tl_math.log(tmp28)
tmp31 = tmp29 - tmp30
tmp32 = tmp31 * tmp3
tmp33 = tmp32 + tmp20
tmp34 = tmp26 + tmp33
tmp35 = tmp25 + tmp34
tmp36 = tmp23 + tmp35
tl.store(out_ptr0 + x7, tmp36, xmask)
@triton.jit
def triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp39 = tl.load(in_ptr3 + x0, xmask)
tmp41 = tl.load(in_ptr4 + x0, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float('inf')
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tmp35 = 0.001
tmp36 = tmp34 * tmp35
tmp38 = tmp37 * tmp35
tmp40 = tl_math.log(tmp39)
tmp42 = tl_math.abs(tmp41)
tmp43 = tmp42 == tmp17
tmp44 = tl.where(tmp43, tmp19, tmp41)
tmp45 = tmp40 + tmp44
tmp46 = tmp3 - tmp45
tmp47 = tmp46 * tmp35
tmp48 = 1.0
tmp49 = tmp47 + tmp48
tmp50 = tmp38 + tmp49
tmp51 = tmp36 + tmp50
tl.store(in_out_ptr0 + x0, tmp51, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr3 + (x0 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr4 + (x0 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tmp9 = 1e-08
tmp10 = tmp8 + tmp9
tmp11 = tl_math.log(tmp10)
tmp13 = tmp11 - tmp12
tmp14 = tmp13 * tmp5
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp7 + tmp16
tmp18 = tmp6 + tmp17
tmp19 = tmp3 + tmp18
tmp20 = 1000.0
tmp21 = tmp19 * tmp20
tl.store(out_ptr0 + x7, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp32 = tl.load(in_ptr2 + x2, xmask)
tmp34 = tl.load(in_ptr3 + x2, xmask)
tmp35 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tmp32 * tmp30
tmp36 = tmp3 - tmp35
tmp37 = tmp36 * tmp30
tmp38 = 1.0
tmp39 = tmp37 + tmp38
tmp40 = tmp34 + tmp39
tmp41 = tmp33 + tmp40
tmp42 = tmp31 + tmp41
tl.store(in_out_ptr0 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_add_div_logsumexp_neg_13(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x4, xmask)
tmp4 = tl.load(in_ptr2 + 4 * x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (1 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr2 + (2 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 1000.0
tmp7 = tmp5 * tmp6
tmp9 = -tmp8
tmp10 = tmp9 + tmp2
tmp12 = tmp10 + tmp11
tmp13 = tmp12 * tmp6
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = -tmp15
tmp17 = tmp16 + tmp2
tmp19 = tmp17 + tmp18
tmp20 = tmp19 * tmp6
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = -tmp22
tmp24 = tmp23 + tmp2
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp6
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float('inf')
tmp31 = tmp29 == tmp30
tmp32 = 0.0
tmp33 = tl.where(tmp31, tmp32, tmp28)
tmp34 = tmp7 - tmp33
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp13 - tmp33
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp20 - tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp42 = tmp27 - tmp33
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tl.store(out_ptr0 + x4, tmp28, xmask)
tl.store(out_ptr1 + x4, tmp44, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 1000.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x7, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp23 = tmp21 + tmp22
tmp24 = tmp20 + tmp23
tmp25 = 1000.0
tmp26 = tmp24 * tmp25
tl.store(out_ptr0 + x7, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_16(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x0, xmask)
tmp34 = tl.load(in_ptr3 + x0, xmask)
tmp41 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tl_math.log(tmp32)
tmp35 = tl_math.abs(tmp34)
tmp36 = tmp35 == tmp12
tmp37 = tl.where(tmp36, tmp14, tmp34)
tmp38 = tmp33 + tmp37
tmp39 = tmp3 - tmp38
tmp40 = tmp39 * tmp30
tmp42 = tmp40 + tmp41
tmp43 = tmp31 + tmp42
tl.store(in_out_ptr0 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_17(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x4 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x3, xmask)
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp10 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp47 = tl.load(in_ptr4 + x3, xmask)
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp6 = tmp4 + tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tmp11 = -tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp13 + tmp6
tmp15 = tmp14 * tmp8
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = -tmp17
tmp20 = tmp18 + tmp19
tmp21 = tmp20 + tmp6
tmp22 = tmp21 * tmp8
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = -tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp27 + tmp6
tmp29 = tmp28 * tmp8
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tl_math.abs(tmp30)
tmp32 = float('inf')
tmp33 = tmp31 == tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp33, tmp34, tmp30)
tmp36 = tmp9 - tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp15 - tmp35
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp22 - tmp35
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp44 = tmp29 - tmp35
tmp45 = tl_math.exp(tmp44)
tmp46 = tmp43 + tmp45
tmp48 = 1e-08
tmp49 = tmp47 + tmp48
tmp50 = tl_math.log(tmp49)
tmp51 = tl_math.log(tmp46)
tmp52 = tmp51 + tmp35
tmp53 = tmp50 - tmp52
tmp54 = 0.001
tmp55 = tmp53 * tmp54
tmp56 = tmp55 + tmp6
tl.store(in_out_ptr0 + x3, tmp56, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_18(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x0, xmask)
tmp34 = tl.load(in_ptr3 + x0, xmask)
tmp41 = tl.load(in_ptr4 + x0, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tl_math.log(tmp32)
tmp35 = tl_math.abs(tmp34)
tmp36 = tmp35 == tmp12
tmp37 = tl.where(tmp36, tmp14, tmp34)
tmp38 = tmp33 + tmp37
tmp39 = tmp3 - tmp38
tmp40 = tmp39 * tmp30
tmp42 = tmp40 + tmp41
tmp43 = tmp31 + tmp42
tl.store(in_out_ptr0 + x0, tmp43, xmask)
@triton.jit
def triton_per_fused_add_div_exp_mul_neg_sum_19(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x0 = xindex % 16
r3 = rindex // 4
x5 = xindex
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r4 + 16 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr1 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr2 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tl.load(in_ptr3 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr4 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr5 + (r2 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 1000.0
tmp24 = tmp22 * tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 * tmp0
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr0 + (r4 + 16 * x5), tmp25, xmask)
tl.store(out_ptr1 + x5, tmp30, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 1, 4),
torch.float32)
get_raw_stream(0)
triton_poi_fused_logsumexp_0[grid(1024)](arg2_1, buf0, 1024, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_logsumexp_1[grid(256)](buf0, arg2_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_logsumexp_neg_2[grid(256)](arg2_1, arg1_1,
buf1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf0
triton_poi_fused_add_div_neg_3[grid(1024)](arg2_1, arg0_1, buf3,
buf2, arg1_1, buf1, buf4, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_add_neg_5[grid(1024)](arg2_1, arg0_1, buf3, buf2,
buf5, arg1_1, buf1, buf6, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_log_logsumexp_sub_6[grid(256)](arg0_1,
buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_add_neg_7[grid(1024)](arg2_1, buf7, arg0_1, buf3,
buf2, buf5, arg1_1, buf1, buf8, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_log_logsumexp_sub_8[grid(256)](arg1_1, buf8,
buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused_add_neg_9[grid(1024)](arg2_1, buf7, arg0_1, buf3,
buf2, buf9, buf5, arg1_1, buf1, buf10, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = buf11
del buf11
triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10[grid(256)](
buf12, arg0_1, buf10, buf7, buf3, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
buf13 = buf10
del buf10
triton_poi_fused_add_div_neg_11[grid(1024)](arg2_1, buf12, buf9,
buf5, arg1_1, buf1, buf13, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf15 = buf1
del buf1
triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12[grid(256)](
buf15, arg1_1, buf13, buf9, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf16 = reinterpret_tensor(buf9, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf9
buf17 = buf5
del buf5
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf12,
buf15, buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf18 = buf13
del buf13
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf17,
buf16, buf12, buf15, buf18, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf19 = buf7
del buf7
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf18, buf19, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf20 = buf18
del buf18
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf17,
buf16, buf12, buf19, buf15, buf20, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf22 = buf12
del buf12
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf22,
arg0_1, buf20, buf17, buf16, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf24 = buf17
del buf17
buf25 = buf24
del buf24
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf25,
arg2_1, buf22, buf19, buf15, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf26 = reinterpret_tensor(buf19, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf19
buf27 = buf15
del buf15
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf22,
buf25, buf26, buf27, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf28 = buf20
del buf20
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf27,
buf26, buf22, buf25, buf28, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf29 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf16
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf28, buf29, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf30 = buf28
del buf28
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf27,
buf26, buf22, buf29, buf25, buf30, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf32 = buf22
del buf22
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf32,
arg0_1, buf30, buf27, buf26, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf34 = buf27
del buf27
buf35 = buf34
del buf34
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf35,
arg2_1, buf32, buf29, buf25, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf36 = reinterpret_tensor(buf29, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf29
buf37 = buf25
del buf25
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf32,
buf35, buf36, buf37, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf38 = buf30
del buf30
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf37,
buf36, buf32, buf35, buf38, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf39 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf26
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf38, buf39, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf40 = buf38
del buf38
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf37,
buf36, buf32, buf39, buf35, buf40, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf42 = buf32
del buf32
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf42,
arg0_1, buf40, buf37, buf36, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf44 = buf37
del buf37
buf45 = buf44
del buf44
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf45,
arg2_1, buf42, buf39, buf35, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf46 = reinterpret_tensor(buf39, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf39
buf47 = buf35
del buf35
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf42,
buf45, buf46, buf47, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf48 = buf40
del buf40
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf47,
buf46, buf42, buf45, buf48, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf49 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf36
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf48, buf49, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf50 = buf48
del buf48
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf47,
buf46, buf42, buf49, buf45, buf50, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf52 = buf42
del buf42
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf52,
arg0_1, buf50, buf47, buf46, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf54 = buf47
del buf47
buf55 = buf54
del buf54
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf55,
arg2_1, buf52, buf49, buf45, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf56 = reinterpret_tensor(buf49, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf49
buf57 = buf45
del buf45
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf52,
buf55, buf56, buf57, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf58 = buf50
del buf50
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf57,
buf56, buf52, buf55, buf58, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf59 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf46
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf58, buf59, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf60 = buf58
del buf58
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf57,
buf56, buf52, buf59, buf55, buf60, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf62 = buf52
del buf52
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf62,
arg0_1, buf60, buf57, buf56, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf64 = buf57
del buf57
buf65 = buf64
del buf64
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf65,
arg2_1, buf62, buf59, buf55, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf66 = reinterpret_tensor(buf59, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf59
buf67 = buf55
del buf55
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf62,
buf65, buf66, buf67, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf68 = buf60
del buf60
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf67,
buf66, buf62, buf65, buf68, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf69 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf56
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf68, buf69, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf70 = buf68
del buf68
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf67,
buf66, buf62, buf69, buf65, buf70, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf72 = buf62
del buf62
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf72,
arg0_1, buf70, buf67, buf66, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf74 = buf67
del buf67
buf75 = buf74
del buf74
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf75,
arg2_1, buf72, buf69, buf65, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf76 = reinterpret_tensor(buf69, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf69
buf77 = buf65
del buf65
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf72,
buf75, buf76, buf77, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf78 = buf70
del buf70
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf77,
buf76, buf72, buf75, buf78, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf79 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf66
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf78, buf79, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf80 = buf78
del buf78
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf77,
buf76, buf72, buf79, buf75, buf80, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf82 = buf72
del buf72
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf82,
arg0_1, buf80, buf77, buf76, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf84 = buf77
del buf77
buf85 = buf84
del buf84
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf85,
arg2_1, buf82, buf79, buf75, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf86 = reinterpret_tensor(buf79, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf79
buf87 = buf75
del buf75
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf82,
buf85, buf86, buf87, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf88 = buf80
del buf80
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf87,
buf86, buf82, buf85, buf88, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf89 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf76
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf88, buf89, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf90 = buf88
del buf88
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf87,
buf86, buf82, buf89, buf85, buf90, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf92 = buf82
del buf82
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf92,
arg0_1, buf90, buf87, buf86, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf94 = buf87
del buf87
buf95 = buf94
del buf94
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf95,
arg2_1, buf92, buf89, buf85, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf96 = reinterpret_tensor(buf89, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf89
buf97 = buf85
del buf85
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf92,
buf95, buf96, buf97, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf98 = buf90
del buf90
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf97,
buf96, buf92, buf95, buf98, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf99 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf86
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf98, buf99, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf100 = buf98
del buf98
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf97,
buf96, buf92, buf99, buf95, buf100, 1024, XBLOCK=256, num_warps
=4, num_stages=1)
buf101 = buf3
del buf3
buf102 = buf101
del buf101
triton_poi_fused_add_log_logsumexp_mul_sub_18[grid(256)](buf102,
arg0_1, buf100, buf97, buf96, buf92, 256, XBLOCK=128, num_warps
=4, num_stages=1)
del buf92
buf104 = buf97
del buf97
buf105 = buf104
del buf104
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf105,
arg2_1, buf102, buf99, buf95, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf106 = reinterpret_tensor(buf99, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf99
buf107 = buf95
del buf95
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf102,
buf105, buf106, buf107, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf108 = buf100
del buf100
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf107,
buf106, buf102, buf105, buf108, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf109 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf96
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf108, buf109, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf110 = buf108
del buf108
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf107,
buf106, buf102, buf109, buf105, buf110, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf112 = buf102
del buf102
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf112,
arg0_1, buf110, buf107, buf106, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf114 = buf107
del buf107
buf115 = buf114
del buf114
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf115,
arg2_1, buf112, buf109, buf105, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf116 = reinterpret_tensor(buf109, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf109
buf117 = buf105
del buf105
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf112,
buf115, buf116, buf117, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf118 = buf110
del buf110
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf117,
buf116, buf112, buf115, buf118, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf119 = reinterpret_tensor(buf106, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf106
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf118, buf119, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf120 = buf118
del buf118
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf117,
buf116, buf112, buf119, buf115, buf120, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf122 = buf112
del buf112
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf122,
arg0_1, buf120, buf117, buf116, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf124 = buf117
del buf117
buf125 = buf124
del buf124
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf125,
arg2_1, buf122, buf119, buf115, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf126 = reinterpret_tensor(buf119, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf119
buf127 = buf115
del buf115
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf122,
buf125, buf126, buf127, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf128 = buf120
del buf120
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf127,
buf126, buf122, buf125, buf128, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf129 = reinterpret_tensor(buf116, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf116
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf128, buf129, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf130 = buf128
del buf128
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf127,
buf126, buf122, buf129, buf125, buf130, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf132 = buf122
del buf122
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf132,
arg0_1, buf130, buf127, buf126, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf134 = buf127
del buf127
buf135 = buf134
del buf134
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf135,
arg2_1, buf132, buf129, buf125, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf136 = reinterpret_tensor(buf129, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf129
buf137 = buf125
del buf125
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf132,
buf135, buf136, buf137, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf138 = buf130
del buf130
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf137,
buf136, buf132, buf135, buf138, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf139 = reinterpret_tensor(buf126, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf126
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf138, buf139, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf140 = buf138
del buf138
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf137,
buf136, buf132, buf139, buf135, buf140, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf142 = buf132
del buf132
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf142,
arg0_1, buf140, buf137, buf136, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf144 = buf137
del buf137
buf145 = buf144
del buf144
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf145,
arg2_1, buf142, buf139, buf135, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf146 = reinterpret_tensor(buf139, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf139
buf147 = buf135
del buf135
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf142,
buf145, buf146, buf147, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf148 = buf140
del buf140
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf147,
buf146, buf142, buf145, buf148, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf149 = reinterpret_tensor(buf136, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf136
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf148, buf149, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf150 = buf148
del buf148
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf147,
buf146, buf142, buf149, buf145, buf150, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf152 = buf142
del buf142
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf152,
arg0_1, buf150, buf147, buf146, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf154 = buf147
del buf147
buf155 = buf154
del buf154
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf155,
arg2_1, buf152, buf149, buf145, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf156 = reinterpret_tensor(buf149, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf149
buf157 = buf145
del buf145
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf152,
buf155, buf156, buf157, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf158 = buf150
del buf150
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf157,
buf156, buf152, buf155, buf158, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf159 = reinterpret_tensor(buf146, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf146
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf158, buf159, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf160 = buf158
del buf158
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf157,
buf156, buf152, buf159, buf155, buf160, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf162 = buf152
del buf152
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf162,
arg0_1, buf160, buf157, buf156, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf164 = buf157
del buf157
buf165 = buf164
del buf164
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf165,
arg2_1, buf162, buf159, buf155, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf166 = reinterpret_tensor(buf159, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf159
buf167 = buf155
del buf155
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf162,
buf165, buf166, buf167, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf168 = buf160
del buf160
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf167,
buf166, buf162, buf165, buf168, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf169 = reinterpret_tensor(buf156, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf156
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf168, buf169, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf170 = buf168
del buf168
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf167,
buf166, buf162, buf169, buf165, buf170, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf172 = buf162
del buf162
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf172,
arg0_1, buf170, buf167, buf166, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf174 = buf167
del buf167
buf175 = buf174
del buf174
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf175,
arg2_1, buf172, buf169, buf165, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf176 = reinterpret_tensor(buf169, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf169
buf177 = buf165
del buf165
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf172,
buf175, buf176, buf177, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf178 = buf170
del buf170
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf177,
buf176, buf172, buf175, buf178, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf179 = reinterpret_tensor(buf166, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf166
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf178, buf179, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf180 = buf178
del buf178
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf177,
buf176, buf172, buf179, buf175, buf180, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf182 = buf172
del buf172
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf182,
arg0_1, buf180, buf177, buf176, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf184 = buf177
del buf177
buf185 = buf184
del buf184
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf185,
arg2_1, buf182, buf179, buf175, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf186 = reinterpret_tensor(buf179, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf179
buf187 = buf175
del buf175
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf182,
buf185, buf186, buf187, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf188 = buf180
del buf180
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf187,
buf186, buf182, buf185, buf188, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf189 = reinterpret_tensor(buf176, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf176
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf188, buf189, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf190 = buf188
del buf188
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf187,
buf186, buf182, buf189, buf185, buf190, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf192 = buf182
del buf182
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf192,
arg0_1, buf190, buf187, buf186, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf194 = buf187
del buf187
buf195 = buf194
del buf194
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf195,
arg2_1, buf192, buf189, buf185, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf196 = reinterpret_tensor(buf189, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf189
buf197 = buf185
del buf185
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf192,
buf195, buf196, buf197, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf198 = buf190
del buf190
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf197,
buf196, buf192, buf195, buf198, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf199 = reinterpret_tensor(buf186, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf186
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf198, buf199, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf200 = buf198
del buf198
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf197,
buf196, buf192, buf199, buf195, buf200, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf202 = buf192
del buf192
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf202,
arg0_1, buf200, buf197, buf196, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf204 = buf197
del buf197
buf205 = buf204
del buf204
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf205,
arg2_1, buf202, buf199, buf195, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf206 = reinterpret_tensor(buf199, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf199
buf207 = buf195
del buf195
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf202,
buf205, buf206, buf207, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf208 = buf200
del buf200
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf207,
buf206, buf202, buf205, buf208, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf209 = reinterpret_tensor(buf196, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf196
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf208, buf209, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf210 = buf208
del buf208
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf207,
buf206, buf202, buf209, buf205, buf210, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf212 = buf202
del buf202
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf212,
arg0_1, buf210, buf207, buf206, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf214 = buf207
del buf207
buf215 = buf214
del buf214
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf215,
arg2_1, buf212, buf209, buf205, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf216 = reinterpret_tensor(buf209, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf209
buf217 = buf205
del buf205
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf212,
buf215, buf216, buf217, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf218 = buf210
del buf210
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf217,
buf216, buf212, buf215, buf218, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf219 = reinterpret_tensor(buf206, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf206
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf218, buf219, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf220 = buf218
del buf218
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf217,
buf216, buf212, buf219, buf215, buf220, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf222 = buf212
del buf212
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf222,
arg0_1, buf220, buf217, buf216, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf224 = buf217
del buf217
buf225 = buf224
del buf224
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf225,
arg2_1, buf222, buf219, buf215, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf226 = reinterpret_tensor(buf219, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf219
buf227 = buf215
del buf215
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf222,
buf225, buf226, buf227, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf228 = buf220
del buf220
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf227,
buf226, buf222, buf225, buf228, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf229 = reinterpret_tensor(buf216, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf216
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf228, buf229, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf230 = buf228
del buf228
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf227,
buf226, buf222, buf229, buf225, buf230, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf232 = buf222
del buf222
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf232,
arg0_1, buf230, buf227, buf226, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf234 = buf227
del buf227
buf235 = buf234
del buf234
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf235,
arg2_1, buf232, buf229, buf225, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf236 = reinterpret_tensor(buf229, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf229
buf237 = buf225
del buf225
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf232,
buf235, buf236, buf237, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf238 = buf230
del buf230
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf237,
buf236, buf232, buf235, buf238, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf239 = reinterpret_tensor(buf226, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf226
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf238, buf239, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf240 = buf238
del buf238
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf237,
buf236, buf232, buf239, buf235, buf240, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf242 = buf232
del buf232
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf242,
arg0_1, buf240, buf237, buf236, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf244 = buf237
del buf237
buf245 = buf244
del buf244
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf245,
arg2_1, buf242, buf239, buf235, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf246 = reinterpret_tensor(buf239, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf239
buf247 = buf235
del buf235
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf242,
buf245, buf246, buf247, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf248 = buf240
del buf240
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf247,
buf246, buf242, buf245, buf248, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf249 = reinterpret_tensor(buf236, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf236
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf248, buf249, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf250 = buf248
del buf248
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf247,
buf246, buf242, buf249, buf245, buf250, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf252 = buf242
del buf242
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf252,
arg0_1, buf250, buf247, buf246, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf254 = buf247
del buf247
buf255 = buf254
del buf254
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf255,
arg2_1, buf252, buf249, buf245, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf256 = reinterpret_tensor(buf249, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf249
buf257 = buf245
del buf245
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf252,
buf255, buf256, buf257, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf258 = buf250
del buf250
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf257,
buf256, buf252, buf255, buf258, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf259 = reinterpret_tensor(buf246, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf246
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf258, buf259, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf260 = buf258
del buf258
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf257,
buf256, buf252, buf259, buf255, buf260, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf262 = buf252
del buf252
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf262,
arg0_1, buf260, buf257, buf256, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf264 = buf257
del buf257
buf265 = buf264
del buf264
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf265,
arg2_1, buf262, buf259, buf255, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf266 = reinterpret_tensor(buf259, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf259
buf267 = buf255
del buf255
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf262,
buf265, buf266, buf267, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf268 = buf260
del buf260
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf267,
buf266, buf262, buf265, buf268, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf269 = reinterpret_tensor(buf256, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf256
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf268, buf269, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf270 = buf268
del buf268
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf267,
buf266, buf262, buf269, buf265, buf270, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf272 = buf262
del buf262
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf272,
arg0_1, buf270, buf267, buf266, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf274 = buf267
del buf267
buf275 = buf274
del buf274
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf275,
arg2_1, buf272, buf269, buf265, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf276 = reinterpret_tensor(buf269, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf269
buf277 = buf265
del buf265
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf272,
buf275, buf276, buf277, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf278 = buf270
del buf270
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf277,
buf276, buf272, buf275, buf278, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf279 = reinterpret_tensor(buf266, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf266
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf278, buf279, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf280 = buf278
del buf278
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf277,
buf276, buf272, buf279, buf275, buf280, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf282 = buf272
del buf272
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf282,
arg0_1, buf280, buf277, buf276, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf284 = buf277
del buf277
buf285 = buf284
del buf284
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf285,
arg2_1, buf282, buf279, buf275, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf286 = reinterpret_tensor(buf279, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf279
buf287 = buf275
del buf275
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf282,
buf285, buf286, buf287, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf288 = buf280
del buf280
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf287,
buf286, buf282, buf285, buf288, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf289 = reinterpret_tensor(buf276, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf276
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf288, buf289, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf290 = buf288
del buf288
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf287,
buf286, buf282, buf289, buf285, buf290, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf292 = buf282
del buf282
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf292,
arg0_1, buf290, buf287, buf286, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf294 = buf287
del buf287
buf295 = buf294
del buf294
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf295,
arg2_1, buf292, buf289, buf285, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf296 = reinterpret_tensor(buf289, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf289
buf297 = buf285
del buf285
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf292,
buf295, buf296, buf297, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf298 = buf290
del buf290
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf297,
buf296, buf292, buf295, buf298, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf299 = reinterpret_tensor(buf286, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf286
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf298, buf299, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf300 = buf298
del buf298
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf297,
buf296, buf292, buf299, buf295, buf300, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf302 = buf292
del buf292
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf302,
arg0_1, buf300, buf297, buf296, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf304 = buf297
del buf297
buf305 = buf304
del buf304
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf305,
arg2_1, buf302, buf299, buf295, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf306 = reinterpret_tensor(buf299, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf299
buf307 = buf295
del buf295
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf302,
buf305, buf306, buf307, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf308 = buf300
del buf300
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf307,
buf306, buf302, buf305, buf308, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf309 = reinterpret_tensor(buf296, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf296
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf308, buf309, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf310 = buf308
del buf308
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf307,
buf306, buf302, buf309, buf305, buf310, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf312 = buf302
del buf302
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf312,
arg0_1, buf310, buf307, buf306, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf314 = buf307
del buf307
buf315 = buf314
del buf314
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf315,
arg2_1, buf312, buf309, buf305, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf316 = reinterpret_tensor(buf309, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf309
buf317 = buf305
del buf305
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf312,
buf315, buf316, buf317, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf318 = buf310
del buf310
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf317,
buf316, buf312, buf315, buf318, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf319 = reinterpret_tensor(buf306, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf306
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf318, buf319, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf320 = buf318
del buf318
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf317,
buf316, buf312, buf319, buf315, buf320, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf322 = buf312
del buf312
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf322,
arg0_1, buf320, buf317, buf316, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf324 = buf317
del buf317
buf325 = buf324
del buf324
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf325,
arg2_1, buf322, buf319, buf315, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf326 = reinterpret_tensor(buf319, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf319
buf327 = buf315
del buf315
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf322,
buf325, buf326, buf327, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf328 = buf320
del buf320
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf327,
buf326, buf322, buf325, buf328, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf329 = reinterpret_tensor(buf316, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf316
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf328, buf329, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf330 = buf328
del buf328
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf327,
buf326, buf322, buf329, buf325, buf330, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf332 = buf322
del buf322
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf332,
arg0_1, buf330, buf327, buf326, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf334 = buf327
del buf327
buf335 = buf334
del buf334
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf335,
arg2_1, buf332, buf329, buf325, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf336 = reinterpret_tensor(buf329, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf329
buf337 = buf325
del buf325
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf332,
buf335, buf336, buf337, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf338 = buf330
del buf330
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf337,
buf336, buf332, buf335, buf338, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf339 = reinterpret_tensor(buf326, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf326
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf338, buf339, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf340 = buf338
del buf338
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf337,
buf336, buf332, buf339, buf335, buf340, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf342 = buf332
del buf332
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf342,
arg0_1, buf340, buf337, buf336, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf344 = buf337
del buf337
buf345 = buf344
del buf344
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf345,
arg2_1, buf342, buf339, buf335, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf346 = reinterpret_tensor(buf339, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf339
buf347 = buf335
del buf335
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf342,
buf345, buf346, buf347, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf348 = buf340
del buf340
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf347,
buf346, buf342, buf345, buf348, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf349 = reinterpret_tensor(buf336, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf336
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf348, buf349, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf350 = buf348
del buf348
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf347,
buf346, buf342, buf349, buf345, buf350, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf352 = buf342
del buf342
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf352,
arg0_1, buf350, buf347, buf346, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf354 = buf347
del buf347
buf355 = buf354
del buf354
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf355,
arg2_1, buf352, buf349, buf345, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf356 = reinterpret_tensor(buf349, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf349
buf357 = buf345
del buf345
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf352,
buf355, buf356, buf357, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf358 = buf350
del buf350
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf357,
buf356, buf352, buf355, buf358, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf359 = reinterpret_tensor(buf346, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf346
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf358, buf359, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf360 = buf358
del buf358
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf357,
buf356, buf352, buf359, buf355, buf360, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf362 = buf352
del buf352
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf362,
arg0_1, buf360, buf357, buf356, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf364 = buf357
del buf357
buf365 = buf364
del buf364
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf365,
arg2_1, buf362, buf359, buf355, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf366 = reinterpret_tensor(buf359, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf359
buf367 = buf355
del buf355
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf362,
buf365, buf366, buf367, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf368 = buf360
del buf360
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf367,
buf366, buf362, buf365, buf368, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf369 = reinterpret_tensor(buf356, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf356
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf368, buf369, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf370 = buf368
del buf368
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf367,
buf366, buf362, buf369, buf365, buf370, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf372 = buf362
del buf362
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf372,
arg0_1, buf370, buf367, buf366, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf374 = buf367
del buf367
buf375 = buf374
del buf374
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf375,
arg2_1, buf372, buf369, buf365, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf376 = reinterpret_tensor(buf369, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf369
buf377 = buf365
del buf365
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf372,
buf375, buf376, buf377, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf378 = buf370
del buf370
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf377,
buf376, buf372, buf375, buf378, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf379 = reinterpret_tensor(buf366, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf366
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf378, buf379, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf380 = buf378
del buf378
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf377,
buf376, buf372, buf379, buf375, buf380, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf382 = buf372
del buf372
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf382,
arg0_1, buf380, buf377, buf376, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf384 = buf377
del buf377
buf385 = buf384
del buf384
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf385,
arg2_1, buf382, buf379, buf375, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf386 = reinterpret_tensor(buf379, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf379
buf387 = buf375
del buf375
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf382,
buf385, buf386, buf387, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf388 = buf380
del buf380
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf387,
buf386, buf382, buf385, buf388, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf389 = reinterpret_tensor(buf376, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf376
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf388, buf389, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf390 = buf388
del buf388
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf387,
buf386, buf382, buf389, buf385, buf390, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf392 = buf382
del buf382
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf392,
arg0_1, buf390, buf387, buf386, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf394 = buf387
del buf387
buf395 = buf394
del buf394
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf395,
arg2_1, buf392, buf389, buf385, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf396 = reinterpret_tensor(buf389, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf389
buf397 = buf385
del buf385
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf392,
buf395, buf396, buf397, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf398 = buf390
del buf390
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf397,
buf396, buf392, buf395, buf398, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf399 = reinterpret_tensor(buf386, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf386
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf398, buf399, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf400 = buf398
del buf398
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf397,
buf396, buf392, buf399, buf395, buf400, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf402 = buf392
del buf392
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf402,
arg0_1, buf400, buf397, buf396, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf404 = buf397
del buf397
buf405 = buf404
del buf404
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf405,
arg2_1, buf402, buf399, buf395, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf406 = reinterpret_tensor(buf399, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf399
buf407 = buf395
del buf395
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf402,
buf405, buf406, buf407, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf408 = buf400
del buf400
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf407,
buf406, buf402, buf405, buf408, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf409 = reinterpret_tensor(buf396, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf396
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf408, buf409, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf410 = buf408
del buf408
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf407,
buf406, buf402, buf409, buf405, buf410, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf412 = buf402
del buf402
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf412,
arg0_1, buf410, buf407, buf406, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf414 = buf407
del buf407
buf415 = buf414
del buf414
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf415,
arg2_1, buf412, buf409, buf405, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf416 = reinterpret_tensor(buf409, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf409
buf417 = buf405
del buf405
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf412,
buf415, buf416, buf417, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf418 = buf410
del buf410
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf417,
buf416, buf412, buf415, buf418, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf419 = reinterpret_tensor(buf406, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf406
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf418, buf419, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf420 = buf418
del buf418
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf417,
buf416, buf412, buf419, buf415, buf420, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf422 = buf412
del buf412
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf422,
arg0_1, buf420, buf417, buf416, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf424 = buf417
del buf417
buf425 = buf424
del buf424
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf425,
arg2_1, buf422, buf419, buf415, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf426 = reinterpret_tensor(buf419, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf419
buf427 = buf415
del buf415
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf422,
buf425, buf426, buf427, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf428 = buf420
del buf420
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf427,
buf426, buf422, buf425, buf428, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf429 = reinterpret_tensor(buf416, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf416
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf428, buf429, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf430 = buf428
del buf428
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf427,
buf426, buf422, buf429, buf425, buf430, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf432 = buf422
del buf422
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf432,
arg0_1, buf430, buf427, buf426, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf434 = buf427
del buf427
buf435 = buf434
del buf434
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf435,
arg2_1, buf432, buf429, buf425, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf436 = reinterpret_tensor(buf429, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf429
buf437 = buf425
del buf425
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf432,
buf435, buf436, buf437, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf438 = buf430
del buf430
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf437,
buf436, buf432, buf435, buf438, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf439 = reinterpret_tensor(buf426, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf426
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf438, buf439, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf440 = buf438
del buf438
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf437,
buf436, buf432, buf439, buf435, buf440, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf442 = buf432
del buf432
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf442,
arg0_1, buf440, buf437, buf436, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf444 = buf437
del buf437
buf445 = buf444
del buf444
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf445,
arg2_1, buf442, buf439, buf435, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf446 = reinterpret_tensor(buf439, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf439
buf447 = buf435
del buf435
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf442,
buf445, buf446, buf447, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf448 = buf440
del buf440
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf447,
buf446, buf442, buf445, buf448, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf449 = reinterpret_tensor(buf436, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf436
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf448, buf449, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf450 = buf448
del buf448
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf447,
buf446, buf442, buf449, buf445, buf450, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf452 = buf442
del buf442
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf452,
arg0_1, buf450, buf447, buf446, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf454 = buf447
del buf447
buf455 = buf454
del buf454
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf455,
arg2_1, buf452, buf449, buf445, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf456 = reinterpret_tensor(buf449, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf449
buf457 = buf445
del buf445
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf452,
buf455, buf456, buf457, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf458 = buf450
del buf450
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf457,
buf456, buf452, buf455, buf458, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf459 = reinterpret_tensor(buf446, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf446
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf458, buf459, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf460 = buf458
del buf458
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf457,
buf456, buf452, buf459, buf455, buf460, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf462 = buf452
del buf452
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf462,
arg0_1, buf460, buf457, buf456, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf464 = buf457
del buf457
buf465 = buf464
del buf464
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf465,
arg2_1, buf462, buf459, buf455, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf466 = reinterpret_tensor(buf459, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf459
buf467 = buf455
del buf455
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf462,
buf465, buf466, buf467, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf468 = buf460
del buf460
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf467,
buf466, buf462, buf465, buf468, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf469 = reinterpret_tensor(buf456, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf456
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf468, buf469, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf470 = buf468
del buf468
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf467,
buf466, buf462, buf469, buf465, buf470, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf472 = buf462
del buf462
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf472,
arg0_1, buf470, buf467, buf466, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf474 = buf467
del buf467
buf475 = buf474
del buf474
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf475,
arg2_1, buf472, buf469, buf465, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf476 = reinterpret_tensor(buf469, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf469
buf477 = buf465
del buf465
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf472,
buf475, buf476, buf477, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf478 = buf470
del buf470
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf477,
buf476, buf472, buf475, buf478, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf479 = reinterpret_tensor(buf466, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf466
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf478, buf479, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf480 = buf478
del buf478
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf477,
buf476, buf472, buf479, buf475, buf480, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf482 = buf472
del buf472
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf482,
arg0_1, buf480, buf477, buf476, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf484 = buf477
del buf477
buf485 = buf484
del buf484
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf485,
arg2_1, buf482, buf479, buf475, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf486 = reinterpret_tensor(buf479, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf479
buf487 = buf475
del buf475
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf482,
buf485, buf486, buf487, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf488 = buf480
del buf480
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf487,
buf486, buf482, buf485, buf488, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf489 = reinterpret_tensor(buf476, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf476
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf488, buf489, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf490 = buf488
del buf488
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf487,
buf486, buf482, buf489, buf485, buf490, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf492 = buf482
del buf482
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf492,
arg0_1, buf490, buf487, buf486, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf486
buf494 = buf487
del buf487
buf495 = buf494
del buf494
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf495,
arg2_1, buf492, buf489, buf485, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
buf496 = reinterpret_tensor(buf489, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf489
buf497 = buf485
del buf485
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf492,
buf495, buf496, buf497, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf498 = buf490
del buf490
buf499 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_add_div_exp_mul_neg_sum_19[grid(64)](arg2_1,
arg0_1, buf497, buf496, buf492, buf495, buf498, buf499, 64, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg2_1
del buf492
del buf495
del buf496
del buf497
return buf499, buf498
class SinkhornDistanceNew(torch.nn.Module):
"""
Given two empirical measures each with :math:`P_1` locations
:math:`x\\in\\mathbb{R}^{D_1}` and :math:`P_2` locations :math:`y\\in\\mathbb{R}^{D_2}`,
outputs an approximation of the regularized OT cost for point clouds.
Args:
eps (float): regularization coefficient
max_iter (int): maximum number of Sinkhorn iterations
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'none'
Shape:
- Input: :math:`(N, P_1, D_1)`, :math:`(N, P_2, D_2)`
- Output: :math:`(N)` or :math:`()`, depending on `reduction`
"""
def __init__(self, eps=0.001, max_iter=100, reduction='none'):
super(SinkhornDistanceNew, self).__init__()
self.eps = eps
self.max_iter = max_iter
self.reduction = reduction
def M(self, C, u, v):
"""
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / epsilon$"
"""
return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
yjh0410/actionformer_release
|
SinkhornDistance
| false
| 16,855
|
[
"MIT"
] | 61
|
7a97422111d3e29c8d2e14088c850c6975855ea7
|
https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7
|
FCN8s
|
import torch
import numpy as np
import torch.nn as nn
class FCN8s(nn.Module):
def __init__(self, n_class=3):
super(FCN8s, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2,
bias=False)
self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride
=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for mod in self.modules():
if isinstance(mod, nn.Conv2d):
mod.weight.data.zero_()
if mod.bias is not None:
mod.bias.data.zero_()
if isinstance(mod, nn.ConvTranspose2d):
m, k, h, w = mod.weight.data.shape
if m != k and k != 1:
raise RuntimeError(
'input + output channels need to be the same or |output| == 1'
)
if h != w:
raise RuntimeError('filters need to be square')
filt = torch.from_numpy(self.upsample_filt(h)).float()
mod.weight.data[range(m), range(k), :, :] = filt
def upsample_filt(self, size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center
) / factor)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
pool3 = h
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
pool4 = h
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore2(h)
upscore2 = h
h = self.score_pool4(pool4)
h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]
score_pool4c = h
h = upscore2 + score_pool4c
h = self.upscore_pool4(h)
upscore_pool4 = h
h = self.score_pool3(pool3)
h = h[:, :, 9:9 + upscore_pool4.size()[2], 9:9 + upscore_pool4.size
()[3]]
score_pool3c = h
h = upscore_pool4 + score_pool3c
h = self.upscore8(h)
h = h[:, :, 31:31 + x.size()[2], 31:31 + x.size()[3]]
return h
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 49
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 9
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 9
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 17572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 131
x2 = xindex // 8384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8448 % 66
x1 = xindex // 128 % 66
x0 = xindex % 128
x3 = xindex // 557568
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 131, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 33
x2 = xindex // 8448
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 17
x1 = xindex // 512 % 17
x0 = xindex % 512
x3 = xindex // 147968
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4608 % 9
x1 = xindex // 512 % 9
x0 = xindex % 512
x3 = xindex // 41472
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_24(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 24 % 8
x3 = xindex // 192
x5 = xindex % 24
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (270 + x5 + 51 * x2 + 867 * x3), xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 54 % 18
x3 = xindex // 972
x5 = xindex % 54
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (918 + x5 + 99 * x2 + 3267 * x3), xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_slice_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex % 64
x3 = xindex // 64
y0 = yindex % 3
y1 = yindex // 3
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (14229 + y0 + 3 * x2 + 456 * x3 + 69312 * y1),
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + 4096 * y4), tmp0, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1))
assert_size_stride(primals_29, (4096,), (1,))
assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_31, (4096,), (1,))
assert_size_stride(primals_32, (3, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_33, (3,), (1,))
assert_size_stride(primals_34, (3, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_35, (3, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_36, (3,), (1,))
assert_size_stride(primals_37, (3, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_38, (3, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_39, (3,), (1,))
assert_size_stride(primals_40, (3, 3, 16, 16), (768, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
triton_poi_fused_1[grid(192, 9)](primals_2, buf1, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512),
torch.float32)
triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152,
49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1)
del primals_28
buf15 = empty_strided_cuda((3, 3, 4, 4), (48, 1, 12, 3), torch.float32)
triton_poi_fused_10[grid(9, 16)](primals_34, buf15, 9, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_34
buf16 = empty_strided_cuda((3, 3, 4, 4), (48, 1, 12, 3), torch.float32)
triton_poi_fused_10[grid(9, 16)](primals_37, buf16, 9, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_37
buf17 = empty_strided_cuda((3, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
triton_poi_fused_11[grid(9, 256)](primals_40, buf17, 9, 256, XBLOCK
=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_40
buf18 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(100, 100), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_12[grid(17572864)](buf19,
primals_3, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_12[grid(17572864)](buf21,
primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.float32)
buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(4393216)](buf21,
buf22, buf23, 4393216, XBLOCK=512, num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_14[grid(8786432)](buf25,
primals_7, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_14[grid(8786432)](buf27,
primals_9, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.float32)
buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(2230272)](buf27,
buf28, buf29, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_16[grid(4460544)](buf31,
primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_16[grid(4460544)](buf33,
primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_16[grid(4460544)](buf35,
primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.float32)
buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(1115136)](buf35,
buf36, buf37, 1115136, XBLOCK=512, num_warps=8, num_stages=1)
buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_18[grid(2230272)](buf39,
primals_17, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_18[grid(2230272)](buf41,
primals_19, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_18[grid(2230272)](buf43,
primals_21, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.float32)
buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(591872)](buf43,
buf44, buf45, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_20[grid(591872)](buf47,
primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_20[grid(591872)](buf49,
primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_20[grid(591872)](buf51,
primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.float32)
buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_21[grid(165888)](buf51,
buf52, buf53, 165888, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_22[grid(147456)](buf55,
primals_29, 147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_29
buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_22[grid(147456)](buf57,
primals_31, 147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_31
buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 3, 3, 3), (27, 1, 9, 3))
buf59 = buf58
del buf58
triton_poi_fused_convolution_23[grid(108)](buf59, primals_33, 108,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_33
buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 3, 8, 8), (192, 1, 24, 3))
buf61 = extern_kernels.convolution(buf44, primals_35, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 3, 17, 17), (867, 1, 51, 3))
buf62 = buf60
del buf60
triton_poi_fused_add_24[grid(768)](buf62, buf61, primals_36, 768,
XBLOCK=128, num_warps=4, num_stages=1)
del buf61
del primals_36
buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 3, 18, 18), (972, 1, 54, 3))
buf64 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 3, 33, 33), (3267, 1, 99, 3))
buf65 = buf63
del buf63
triton_poi_fused_add_25[grid(3888)](buf65, buf64, primals_39, 3888,
XBLOCK=256, num_warps=4, num_stages=1)
del buf64
del primals_39
buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 3, 152, 152), (69312, 1, 456, 3))
buf67 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_slice_26[grid(12, 4096)](buf66, buf67, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf66
return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32,
buf15, primals_35, buf16, primals_38, buf17, buf19, buf21, buf22,
buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36,
buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51,
buf52, buf53, buf55, buf57, buf59, buf62, buf65)
class FCN8sNew(nn.Module):
def __init__(self, n_class=3):
super(FCN8sNew, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2,
bias=False)
self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride
=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for mod in self.modules():
if isinstance(mod, nn.Conv2d):
mod.weight.data.zero_()
if mod.bias is not None:
mod.bias.data.zero_()
if isinstance(mod, nn.ConvTranspose2d):
m, k, h, w = mod.weight.data.shape
if m != k and k != 1:
raise RuntimeError(
'input + output channels need to be the same or |output| == 1'
)
if h != w:
raise RuntimeError('filters need to be square')
filt = torch.from_numpy(self.upsample_filt(h)).float()
mod.weight.data[range(m), range(k), :, :] = filt
def upsample_filt(self, size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center
) / factor)
def forward(self, input_0):
primals_2 = self.conv1_1.weight
primals_3 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_1.weight
primals_23 = self.conv5_1.bias
primals_24 = self.conv5_2.weight
primals_25 = self.conv5_2.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_28 = self.fc6.weight
primals_29 = self.fc6.bias
primals_30 = self.fc7.weight
primals_31 = self.fc7.bias
primals_32 = self.score_fr.weight
primals_33 = self.score_fr.bias
primals_38 = self.score_pool3.weight
primals_36 = self.score_pool3.bias
primals_35 = self.score_pool4.weight
primals_39 = self.score_pool4.bias
primals_34 = self.upscore2.weight
primals_40 = self.upscore8.weight
primals_37 = self.upscore_pool4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40])
return output[0]
|
twni2016/OrganSegRSTN_PyTorch
|
FCN8s
| false
| 16,856
|
[
"MIT"
] | 100
|
bf571320e718c8f138e04d48645e3b4dfe75801d
|
https://github.com/twni2016/OrganSegRSTN_PyTorch/tree/bf571320e718c8f138e04d48645e3b4dfe75801d
|
LayoutNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LayoutNet(nn.Module):
def __init__(self):
super(LayoutNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1)
self.conv5 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=1, stride=1)
self.conv7 = nn.Conv2d(1024, 2048, kernel_size=3, padding=1, stride=1)
self.deconv00 = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0 = nn.Conv2d(1024 * 2, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1 = nn.Conv2d(512 * 2, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2 = nn.Conv2d(256 * 2, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3 = nn.Conv2d(128 * 2, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4 = nn.Conv2d(64 * 2, 32, kernel_size=3, padding=1, stride=1
)
self.deconv5 = nn.Conv2d(32 * 2, 3, kernel_size=3, padding=1, stride=1)
self.deconv6_sf = nn.Sigmoid()
self.deconv00_c = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0_c = nn.Conv2d(1024 * 3, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1_c = nn.Conv2d(512 * 3, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2_c = nn.Conv2d(256 * 3, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3_c = nn.Conv2d(128 * 3, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4_c = nn.Conv2d(64 * 3, 32, kernel_size=3, padding=1,
stride=1)
self.deconv5_c = nn.Conv2d(32 * 3, 16, kernel_size=3, padding=1,
stride=1)
self.deconv6_sf_c = nn.Sigmoid()
self.ref1 = nn.Linear(2048 * 4 * 4, 1024)
self.ref2 = nn.Linear(1024, 256)
self.ref3 = nn.Linear(256, 64)
self.ref4 = nn.Linear(64, 11)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
conv1 = self.conv1(x)
conv1_relu = self.relu(conv1)
pool1 = self.pool(conv1_relu)
conv2 = self.conv2(pool1)
conv2_relu = self.relu(conv2)
pool2 = self.pool(conv2_relu)
conv3 = self.conv3(pool2)
conv3_relu = self.relu(conv3)
pool3 = self.pool(conv3_relu)
conv4 = self.conv4(pool3)
conv4_relu = self.relu(conv4)
pool4 = self.pool(conv4_relu)
conv5 = self.conv5(pool4)
conv5_relu = self.relu(conv5)
pool5 = self.pool(conv5_relu)
conv6 = self.conv6(pool5)
conv6_relu = self.relu(conv6)
pool6 = self.pool(conv6_relu)
conv7 = self.conv7(pool6)
conv7_relu = self.relu(conv7)
pool7 = self.pool(conv7_relu)
unpool00 = F.interpolate(pool7, scale_factor=2)
deconv00 = self.deconv00(unpool00)
deconv00_relu = self.relu(deconv00)
unpool0_ = torch.cat((deconv00_relu, pool6), dim=1)
unpool0 = F.interpolate(unpool0_, scale_factor=2)
deconv0 = self.deconv0(unpool0)
deconv0_relu = self.relu(deconv0)
unpool1_ = torch.cat((deconv0_relu, pool5), dim=1)
unpool1 = F.interpolate(unpool1_, scale_factor=2)
deconv1 = self.deconv1(unpool1)
deconv1_relu = self.relu(deconv1)
unpool2_ = torch.cat((deconv1_relu, pool4), dim=1)
unpool2 = F.interpolate(unpool2_, scale_factor=2)
deconv2 = self.deconv2(unpool2)
deconv2_relu = self.relu(deconv2)
unpool3_ = torch.cat((deconv2_relu, pool3), dim=1)
unpool3 = F.interpolate(unpool3_, scale_factor=2)
deconv3 = self.deconv3(unpool3)
deconv3_relu = self.relu(deconv3)
unpool4_ = torch.cat((deconv3_relu, pool2), dim=1)
unpool4 = F.interpolate(unpool4_, scale_factor=2)
deconv4 = self.deconv4(unpool4)
deconv4_relu = self.relu(deconv4)
unpool5_ = torch.cat((deconv4_relu, pool1), dim=1)
unpool5 = F.interpolate(unpool5_, scale_factor=2)
deconv5 = self.deconv5(unpool5)
deconv6_sf = self.deconv6_sf(deconv5)
deconv00_c = self.deconv00_c(unpool00)
deconv00_relu_c = self.relu(deconv00_c)
unpool0_c = torch.cat((deconv00_relu_c, unpool0_), dim=1)
unpool0_c = F.interpolate(unpool0_c, scale_factor=2)
deconv0_c = self.deconv0_c(unpool0_c)
deconv0_relu_c = self.relu(deconv0_c)
unpool1_c = torch.cat((deconv0_relu_c, unpool1_), dim=1)
unpool1_c = F.interpolate(unpool1_c, scale_factor=2)
deconv1_c = self.deconv1_c(unpool1_c)
deconv1_relu_c = self.relu(deconv1_c)
unpool2_c = torch.cat((deconv1_relu_c, unpool2_), dim=1)
unpool2_c = F.interpolate(unpool2_c, scale_factor=2)
deconv2_c = self.deconv2_c(unpool2_c)
deconv2_relu_c = self.relu(deconv2_c)
unpool3_c = torch.cat((deconv2_relu_c, unpool3_), dim=1)
unpool3_c = F.interpolate(unpool3_c, scale_factor=2)
deconv3_c = self.deconv3_c(unpool3_c)
deconv3_relu_c = self.relu(deconv3_c)
unpool4_c = torch.cat((deconv3_relu_c, unpool4_), dim=1)
unpool4_c = F.interpolate(unpool4_c, scale_factor=2)
deconv4_c = self.deconv4_c(unpool4_c)
deconv4_relu_c = self.relu(deconv4_c)
unpool5_c = torch.cat((deconv4_relu_c, unpool5_), dim=1)
unpool5_c = F.interpolate(unpool5_c, scale_factor=2)
deconv5_c = self.deconv5_c(unpool5_c)
deconv6_sf_c = self.deconv6_sf_c(deconv5_c)
ref0 = pool7.view(-1, 2048 * 4 * 4)
ref1 = self.ref1(ref0)
ref1_relu = self.relu(ref1)
ref2 = self.ref2(ref1_relu)
ref2_relu = self.relu(ref2)
ref3 = self.ref3(ref2_relu)
ref3_relu = self.relu(ref3)
ref4 = self.ref4(ref3_relu)
return deconv6_sf, deconv6_sf_c, ref4
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 512 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (256 + 2 * x0 + 512 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (257 + 2 * x0 + 512 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 256 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 256 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + 2 * x0 + 256 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (129 + 2 * x0 + 256 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 1024
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 2048
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_max_pool2d_with_indices_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.load(in_ptr1 + (4 + 2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (5 + 2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x4, tmp15, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_16(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_17(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 2048
x3 = xindex // 131072
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 1024, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x2 + 16384 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 2048, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * (-1024 + x2) + 16384 *
x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_19(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256 % 1024
x3 = xindex // 262144
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 512, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x2 + 32768 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 1024, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * (-512 + x2) + 32768 *
x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_20(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_21(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x2 = xindex // 1024 % 512
x3 = xindex // 524288
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 256, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x2 + 65536 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 512, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * (-256 + x2) + 65536 *
x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_22(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096 % 256
x3 = xindex // 1048576
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 128, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2 + 131072 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 256, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * (-128 + x2) +
131072 * x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_24(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_25(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 128 % 128
x0 = xindex % 128
x2 = xindex // 16384 % 128
x3 = xindex // 2097152
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 64, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 64 * tmp4 + 4096 * x2 + 262144 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 128, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 64 * tmp4 + 4096 * (-64 + x2) +
262144 * x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_26(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_27(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536 % 64
x3 = xindex // 4194304
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 32, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 128 * tmp4 + 16384 * x2 + 524288 * x3
), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 64, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 128 * tmp4 + 16384 * (-32 + x2) +
524288 * x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_28(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_29(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 3072
x3 = xindex // 196608
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 1024, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x2 + 16384 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 3072, tl.int64)
tmp24 = -1024 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * (-1024 + x2) + 16384 *
x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-1024 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 2048, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 4 * tmp4 + 16 * (-1024 + (-1024 + x2)
) + 16384 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_30(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256 % 1536
x3 = xindex // 393216
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 512, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x2 + 32768 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 1536, tl.int64)
tmp24 = -512 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * (-512 + x2) + 32768 *
x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-512 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 1024, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 8 * tmp4 + 64 * (-512 + (-512 + x2)) +
32768 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_31(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x2 = xindex // 1024 % 768
x3 = xindex // 786432
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 256, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x2 + 65536 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 768, tl.int64)
tmp24 = -256 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * (-256 + x2) + 65536 *
x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-256 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 512, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 16 * tmp4 + 256 * (-256 + (-256 + x2)
) + 65536 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_32(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096 % 384
x3 = xindex // 1572864
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 128, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2 + 131072 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 384, tl.int64)
tmp24 = -128 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * (-128 + x2) +
131072 * x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-128 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 256, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 32 * tmp4 + 1024 * (-128 + (-128 + x2
)) + 131072 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_33(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 128 % 128
x0 = xindex % 128
x2 = xindex // 16384 % 192
x3 = xindex // 3145728
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 64, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 64 * tmp4 + 4096 * x2 + 262144 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 192, tl.int64)
tmp24 = -64 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 64 * tmp4 + 4096 * (-64 + x2) +
262144 * x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-64 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 128, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 64 * tmp4 + 4096 * (-64 + (-64 + x2)) +
262144 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_34(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536 % 96
x3 = xindex // 6291456
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 32, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 128 * tmp4 + 16384 * x2 + 524288 * x3
), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 96, tl.int64)
tmp24 = -32 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 128 * tmp4 + 16384 * (-32 + x2) +
524288 * x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-32 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 64, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 128 * tmp4 + 16384 * (-32 + (-32 + x2
)) + 524288 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_35(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_relu_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_39(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_40(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_41(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_42(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_43(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_44(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 1024
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_13, (1024,), (1,))
assert_size_stride(primals_14, (2048, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_15, (2048,), (1,))
assert_size_stride(primals_16, (1024, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_17, (1024,), (1,))
assert_size_stride(primals_18, (512, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (256, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (32,), (1,))
assert_size_stride(primals_28, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_29, (3,), (1,))
assert_size_stride(primals_30, (1024, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_31, (1024,), (1,))
assert_size_stride(primals_32, (512, 3072, 3, 3), (27648, 9, 3, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (256, 1536, 3, 3), (13824, 9, 3, 1))
assert_size_stride(primals_35, (256,), (1,))
assert_size_stride(primals_36, (128, 768, 3, 3), (6912, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (32, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_41, (32,), (1,))
assert_size_stride(primals_42, (16, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_43, (16,), (1,))
assert_size_stride(primals_44, (1024, 32768), (32768, 1))
assert_size_stride(primals_45, (1024,), (1,))
assert_size_stride(primals_46, (256, 1024), (1024, 1))
assert_size_stride(primals_47, (256,), (1,))
assert_size_stride(primals_48, (64, 256), (256, 1))
assert_size_stride(primals_49, (64,), (1,))
assert_size_stride(primals_50, (11, 64), (64, 1))
assert_size_stride(primals_51, (11,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 256, 256), (2097152, 65536, 256, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(8388608)](buf1, primals_2,
8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1
), torch.float32)
buf3 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(2097152)](buf1,
buf2, buf3, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 128, 128), (1048576, 16384, 128, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(4194304)](buf5, primals_5,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(1048576)](buf5,
buf6, buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(2097152)](buf9, primals_7,
2097152, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(524288)](buf9,
buf10, buf11, 524288, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 32, 32), (262144, 1024, 32, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_6[grid(1048576)](buf13, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
buf15 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(262144)](buf13,
buf14, buf15, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 512, 16, 16), (131072, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_8[grid(524288)](buf17, primals_11,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
buf19 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(131072)](buf17,
buf18, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1024, 8, 8), (65536, 64, 8, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_10[grid(262144)](buf21,
primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf22 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.float32)
buf23 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_11[grid(65536)](buf21,
buf22, buf23, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 2048, 4, 4), (32768, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_12[grid(131072)](buf25,
primals_15, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf26 = empty_strided_cuda((4, 2048, 2, 2), (8192, 4, 2, 1), torch.int8
)
buf63 = empty_strided_cuda((4, 2048, 2, 2), (8192, 4, 2, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_13[grid(32768)](buf25,
buf26, buf63, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_14[grid(4)](buf27, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((4, 2048, 4, 4), (32768, 16, 4, 1),
torch.float32)
triton_poi_fused__unsafe_index_max_pool2d_with_indices_15[grid(131072)
](buf27, buf25, buf28, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
buf29 = extern_kernels.convolution(buf28, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 1024, 4, 4), (16384, 16, 4, 1))
buf30 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_16[grid(8)](buf30, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 2048, 8, 8), (131072, 64, 8, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_17[grid(524288)](buf30, buf29,
primals_17, buf22, buf31, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 8, 8), (32768, 64, 8, 1))
buf33 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_18[grid(16)](buf33, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_19[grid(1048576)](buf33, buf32,
primals_19, buf18, buf34, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
buf35 = extern_kernels.convolution(buf34, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 16, 16), (65536, 256, 16, 1))
buf36 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_20[grid(32)](buf36, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf37 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_21[grid(2097152)](buf36, buf35,
primals_21, buf14, buf37, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf39 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_22[grid(64)](buf39, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_23[grid(4194304)](buf39, buf38,
primals_23, buf10, buf40, 4194304, XBLOCK=1024, num_warps=4,
num_stages=1)
buf41 = extern_kernels.convolution(buf40, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf42 = empty_strided_cuda((128,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_24[grid(128)](buf42, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf43 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_25[grid(8388608)](buf42, buf41,
primals_25, buf6, buf43, 8388608, XBLOCK=1024, num_warps=4,
num_stages=1)
buf44 = extern_kernels.convolution(buf43, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 32, 128, 128), (524288, 16384, 128, 1))
buf45 = empty_strided_cuda((256,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_26[grid(256)](buf45, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf46 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_27[grid(16777216)](buf45, buf44,
primals_27, buf2, buf46, 16777216, XBLOCK=1024, num_warps=4,
num_stages=1)
buf47 = extern_kernels.convolution(buf46, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 3, 256, 256), (196608, 65536, 256, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_sigmoid_28[grid(786432)](buf48,
primals_29, 786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf49 = extern_kernels.convolution(buf28, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 1024, 4, 4), (16384, 16, 4, 1))
buf50 = empty_strided_cuda((4, 3072, 8, 8), (196608, 64, 8, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_29[grid(786432)](buf30, buf49,
primals_31, buf29, primals_17, buf22, buf50, 786432, XBLOCK=
1024, num_warps=4, num_stages=1)
buf51 = extern_kernels.convolution(buf50, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 8, 8), (32768, 64, 8, 1))
buf52 = empty_strided_cuda((4, 1536, 16, 16), (393216, 256, 16, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_30[grid(1572864)](buf33, buf51,
primals_33, buf32, primals_19, buf18, buf52, 1572864, XBLOCK=
1024, num_warps=4, num_stages=1)
buf53 = extern_kernels.convolution(buf52, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 16, 16), (65536, 256, 16, 1))
buf54 = empty_strided_cuda((4, 768, 32, 32), (786432, 1024, 32, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_31[grid(3145728)](buf36, buf53,
primals_35, buf35, primals_21, buf14, buf54, 3145728, XBLOCK=
1024, num_warps=4, num_stages=1)
buf55 = extern_kernels.convolution(buf54, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf56 = empty_strided_cuda((4, 384, 64, 64), (1572864, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_32[grid(6291456)](buf39, buf55,
primals_37, buf38, primals_23, buf10, buf56, 6291456, XBLOCK=
1024, num_warps=4, num_stages=1)
buf57 = extern_kernels.convolution(buf56, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf58 = empty_strided_cuda((4, 192, 128, 128), (3145728, 16384, 128,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_33[grid(12582912)](buf42, buf57,
primals_39, buf41, primals_25, buf6, buf58, 12582912, XBLOCK=
1024, num_warps=4, num_stages=1)
buf59 = extern_kernels.convolution(buf58, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 128, 128), (524288, 16384, 128, 1))
buf60 = empty_strided_cuda((4, 96, 256, 256), (6291456, 65536, 256,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_34[grid(25165824)](buf45, buf59,
primals_41, buf44, primals_27, buf2, buf60, 25165824, XBLOCK=
1024, num_warps=4, num_stages=1)
buf61 = extern_kernels.convolution(buf60, primals_42, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 16, 256, 256), (1048576, 65536, 256, 1))
buf62 = buf61
del buf61
triton_poi_fused_convolution_sigmoid_35[grid(4194304)](buf62,
primals_43, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf64 = empty_strided_cuda((1, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf63, (1, 32768), (0, 1), 0),
reinterpret_tensor(primals_44, (32768, 1024), (1, 32768), 0),
out=buf64)
buf65 = buf64
del buf64
triton_poi_fused_relu_36[grid(1024)](buf65, primals_45, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_45
buf66 = empty_strided_cuda((1, 256), (256, 1), torch.float32)
extern_kernels.mm(buf65, reinterpret_tensor(primals_46, (1024, 256),
(1, 1024), 0), out=buf66)
buf67 = buf66
del buf66
triton_poi_fused_relu_37[grid(256)](buf67, primals_47, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_47
buf68 = empty_strided_cuda((1, 64), (64, 1), torch.float32)
extern_kernels.mm(buf67, reinterpret_tensor(primals_48, (256, 64),
(1, 256), 0), out=buf68)
buf69 = buf68
del buf68
triton_poi_fused_relu_38[grid(64)](buf69, primals_49, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_49
buf70 = empty_strided_cuda((1, 11), (11, 1), torch.float32)
extern_kernels.addmm(primals_51, buf69, reinterpret_tensor(
primals_50, (64, 11), (1, 64), 0), alpha=1, beta=1, out=buf70)
del primals_51
buf71 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128,
1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_39[grid(2097152)](
buf59, primals_41, buf71, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf59
del primals_41
buf72 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_40[grid(1048576)](
buf57, primals_39, buf72, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf57
del primals_39
buf73 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_41[grid(524288)](
buf55, primals_37, buf73, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf55
del primals_37
buf74 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_42[grid(262144)](
buf53, primals_35, buf74, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf53
del primals_35
buf75 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_43[grid(131072)](
buf51, primals_33, buf75, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf51
del primals_33
buf76 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_44[grid(65536)](
buf49, primals_31, buf76, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf49
del primals_31
buf77 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128,
1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_39[grid(2097152)](
buf44, primals_27, buf77, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf44
del primals_27
buf78 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_40[grid(1048576)](
buf41, primals_25, buf78, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf41
del primals_25
buf79 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_41[grid(524288)](
buf38, primals_23, buf79, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf38
del primals_23
buf80 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_42[grid(262144)](
buf35, primals_21, buf80, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf35
del primals_21
buf81 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_43[grid(131072)](
buf32, primals_19, buf81, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf32
del primals_19
buf82 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_44[grid(65536)](
buf29, primals_17, buf82, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf29
del primals_17
return (buf48, buf62, buf70, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_34, primals_36,
primals_38, primals_40, primals_42, buf1, buf2, buf3, buf5, buf6,
buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19,
buf21, buf22, buf23, buf25, buf26, buf27, buf28, buf30, buf31,
buf33, buf34, buf36, buf37, buf39, buf40, buf42, buf43, buf45,
buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf62,
reinterpret_tensor(buf63, (1, 32768), (32768, 1), 0), buf65, buf67,
buf69, primals_50, primals_48, primals_46, primals_44, buf71, buf72,
buf73, buf74, buf75, buf76, buf77, buf78, buf79, buf80, buf81, buf82)
class LayoutNetNew(nn.Module):
def __init__(self):
super(LayoutNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1)
self.conv5 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=1, stride=1)
self.conv7 = nn.Conv2d(1024, 2048, kernel_size=3, padding=1, stride=1)
self.deconv00 = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0 = nn.Conv2d(1024 * 2, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1 = nn.Conv2d(512 * 2, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2 = nn.Conv2d(256 * 2, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3 = nn.Conv2d(128 * 2, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4 = nn.Conv2d(64 * 2, 32, kernel_size=3, padding=1, stride=1
)
self.deconv5 = nn.Conv2d(32 * 2, 3, kernel_size=3, padding=1, stride=1)
self.deconv6_sf = nn.Sigmoid()
self.deconv00_c = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0_c = nn.Conv2d(1024 * 3, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1_c = nn.Conv2d(512 * 3, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2_c = nn.Conv2d(256 * 3, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3_c = nn.Conv2d(128 * 3, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4_c = nn.Conv2d(64 * 3, 32, kernel_size=3, padding=1,
stride=1)
self.deconv5_c = nn.Conv2d(32 * 3, 16, kernel_size=3, padding=1,
stride=1)
self.deconv6_sf_c = nn.Sigmoid()
self.ref1 = nn.Linear(2048 * 4 * 4, 1024)
self.ref2 = nn.Linear(1024, 256)
self.ref3 = nn.Linear(256, 64)
self.ref4 = nn.Linear(64, 11)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.deconv00.weight
primals_17 = self.deconv00.bias
primals_18 = self.deconv0.weight
primals_19 = self.deconv0.bias
primals_20 = self.deconv1.weight
primals_21 = self.deconv1.bias
primals_22 = self.deconv2.weight
primals_23 = self.deconv2.bias
primals_24 = self.deconv3.weight
primals_25 = self.deconv3.bias
primals_26 = self.deconv4.weight
primals_27 = self.deconv4.bias
primals_28 = self.deconv5.weight
primals_29 = self.deconv5.bias
primals_30 = self.deconv00_c.weight
primals_31 = self.deconv00_c.bias
primals_32 = self.deconv0_c.weight
primals_33 = self.deconv0_c.bias
primals_34 = self.deconv1_c.weight
primals_35 = self.deconv1_c.bias
primals_36 = self.deconv2_c.weight
primals_37 = self.deconv2_c.bias
primals_38 = self.deconv3_c.weight
primals_39 = self.deconv3_c.bias
primals_40 = self.deconv4_c.weight
primals_41 = self.deconv4_c.bias
primals_42 = self.deconv5_c.weight
primals_43 = self.deconv5_c.bias
primals_44 = self.ref1.weight
primals_45 = self.ref1.bias
primals_46 = self.ref2.weight
primals_47 = self.ref2.bias
primals_48 = self.ref3.weight
primals_49 = self.ref3.bias
primals_50 = self.ref4.weight
primals_51 = self.ref4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51])
return output[0], output[1], output[2]
|
wellowdata/pytorch-layoutnet
|
LayoutNet
| false
| 16,857
|
[
"MIT"
] | 155
|
3d4352f94ed00d3c37890e9119452811d4f0893f
|
https://github.com/wellowdata/pytorch-layoutnet/tree/3d4352f94ed00d3c37890e9119452811d4f0893f
|
ClassNetVideoConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Unit3D(nn.Module):
"""Basic unit containing Conv3D + BatchNorm + non-linearity."""
def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1),
stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=
True, use_bias=False, name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self.
_output_channels, kernel_size=self._kernel_shape, stride=self.
_stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001,
momentum=0.01)
def compute_pad(self, dim, s):
"""Get the zero padding number."""
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - s % self._stride[dim], 0)
def forward(self, x):
"""
Connects the module to inputs. Dynamically pad based on input size in forward function.
Args:
x: Inputs to the Unit3D component.
Returns:
Outputs from the module.
"""
_batch, _channel, time, height, width = x.size()
pad_t = self.compute_pad(0, time)
pad_h = self.compute_pad(1, height)
pad_w = self.compute_pad(2, width)
pad_t_front = pad_t // 2
pad_t_back = pad_t - pad_t_front
pad_h_front = pad_h // 2
pad_h_back = pad_h - pad_h_front
pad_w_front = pad_w // 2
pad_w_back = pad_w - pad_w_front
pad = (pad_w_front, pad_w_back, pad_h_front, pad_h_back,
pad_t_front, pad_t_back)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class ClassNetVideoConv(nn.Module):
"""Classifier network for video input refer to MMSADA.
Args:
input_size (int, optional): the dimension of the final feature vector. Defaults to 1024.
n_class (int, optional): the number of classes. Defaults to 8.
References:
Munro Jonathan, and Dima Damen. "Multi-modal domain adaptation for fine-grained action recognition."
In CVPR, pp. 122-132. 2020.
"""
def __init__(self, input_size=1024, n_class=8):
super(ClassNetVideoConv, self).__init__()
self.dp = nn.Dropout()
self.logits = Unit3D(in_channels=input_size, output_channels=
n_class, kernel_shape=[1, 1, 1], padding=0, activation_fn=None,
use_batch_norm=False, use_bias=True)
def forward(self, input):
x = self.logits(self.dp(input))
return x
def get_inputs():
return [torch.rand([4, 1024, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 64, 64, 64), (268435456, 262144,
4096, 64, 1))
assert_size_stride(primals_2, (8, 1024, 1, 1, 1), (1024, 1, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64, 64), (2097152, 262144, 4096,
64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(8388608)](buf1, primals_3,
8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
class Unit3D(nn.Module):
"""Basic unit containing Conv3D + BatchNorm + non-linearity."""
def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1),
stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=
True, use_bias=False, name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self.
_output_channels, kernel_size=self._kernel_shape, stride=self.
_stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001,
momentum=0.01)
def compute_pad(self, dim, s):
"""Get the zero padding number."""
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - s % self._stride[dim], 0)
def forward(self, x):
"""
Connects the module to inputs. Dynamically pad based on input size in forward function.
Args:
x: Inputs to the Unit3D component.
Returns:
Outputs from the module.
"""
_batch, _channel, time, height, width = x.size()
pad_t = self.compute_pad(0, time)
pad_h = self.compute_pad(1, height)
pad_w = self.compute_pad(2, width)
pad_t_front = pad_t // 2
pad_t_back = pad_t - pad_t_front
pad_h_front = pad_h // 2
pad_h_back = pad_h - pad_h_front
pad_w_front = pad_w // 2
pad_w_back = pad_w - pad_w_front
pad = (pad_w_front, pad_w_back, pad_h_front, pad_h_back,
pad_t_front, pad_t_back)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class ClassNetVideoConvNew(nn.Module):
"""Classifier network for video input refer to MMSADA.
Args:
input_size (int, optional): the dimension of the final feature vector. Defaults to 1024.
n_class (int, optional): the number of classes. Defaults to 8.
References:
Munro Jonathan, and Dima Damen. "Multi-modal domain adaptation for fine-grained action recognition."
In CVPR, pp. 122-132. 2020.
"""
def __init__(self, input_size=1024, n_class=8):
super(ClassNetVideoConvNew, self).__init__()
self.dp = nn.Dropout()
self.logits = Unit3D(in_channels=input_size, output_channels=
n_class, kernel_shape=[1, 1, 1], padding=0, activation_fn=None,
use_batch_norm=False, use_bias=True)
def forward(self, input_0):
primals_2 = self.logits.conv3d.weight
primals_3 = self.logits.conv3d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
SheffieldAI/pykale
|
ClassNetVideoConv
| false
| 16,858
|
[
"MIT"
] | 324
|
be7670941fb06835883c80477b26702d407017db
|
https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db
|
single_param
|
import torch
import torch.nn as nn
import torch.distributions
class single_param(nn.Module):
def __init__(self, value):
super(single_param, self).__init__()
self.p = nn.Parameter(torch.FloatTensor([value]))
def forward(self):
return torch.abs(self.p)
def get_inputs():
return []
def get_init_inputs():
return [[], {'value': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.abs(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_0[grid(1)](primals_1, buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
return buf0, primals_1
class single_paramNew(nn.Module):
def __init__(self, value):
super(single_paramNew, self).__init__()
self.p = nn.Parameter(torch.FloatTensor([value]))
def forward(self):
primals_1 = self.p
output = call([primals_1])
return output[0]
|
AaltoML/PeriodicBNN
|
single_param
| false
| 16,859
|
[
"MIT"
] | 9
|
1638edb365641e7fe2ea2ab3c15b9439473f9cf3
|
https://github.com/AaltoML/PeriodicBNN/tree/1638edb365641e7fe2ea2ab3c15b9439473f9cf3
|
VertexDirectEmbedder
|
import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedder(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedder, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
def forward(self) ->torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def get_inputs():
return []
def get_init_inputs():
return [[], {'num_vertices': 4, 'embed_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf0, primals_1
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedderNew(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedderNew, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def forward(self):
primals_1 = self.embeddings
output = call([primals_1])
return output[0]
|
AbirKhan96/facebook-detectron2
|
VertexDirectEmbedder
| false
| 16,860
|
[
"Apache-2.0"
] | 5
|
6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
IIDIsotropicGaussianUVLoss
|
import math
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class IIDIsotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u:
'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'):
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta /
sigma2)
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_lower_bound': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp13 = tl.load(in_ptr1 + r0, None)
tmp14 = tl.load(in_ptr2 + r0, None)
tmp17 = tl.load(in_ptr3 + r0, None)
tmp18 = tl.load(in_ptr4 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.8378770664093453
tmp12 = tmp10 + tmp11
tmp15 = tmp13 - tmp14
tmp16 = tmp15 * tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp16 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp12 + tmp22
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return buf0,
class IIDIsotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IIDIsotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
|
AbirKhan96/facebook-detectron2
|
IIDIsotropicGaussianUVLoss
| false
| 16,861
|
[
"Apache-2.0"
] | 5
|
6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
LastLevelMaxPool
|
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = 'p5'
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class LastLevelMaxPoolNew(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = 'p5'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AbirKhan96/facebook-detectron2
|
LastLevelMaxPool
| false
| 16,862
|
[
"Apache-2.0"
] | 5
|
6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
HardSigmoid
|
import torch
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
def hard_sigmoid(input_, inplace: 'bool'=False):
"""hard sigmoid function"""
if inplace:
return input_.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
return F.relu6(input_ + 3.0) / 6.0
class HardSigmoid(nn.Module):
"""hard sigmoid module"""
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.inplace = inplace
def forward(self, input_):
return hard_sigmoid(input_, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hard_sigmoid(input_, inplace: 'bool'=False):
"""hard sigmoid function"""
if inplace:
return input_.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
return F.relu6(input_ + 3.0) / 6.0
class HardSigmoidNew(nn.Module):
"""hard sigmoid module"""
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Adlik/zen_nas
|
HardSigmoid
| false
| 16,863
|
[
"Apache-2.0"
] | 7
|
d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
https://github.com/Adlik/zen_nas/tree/d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
ResizeTransform
|
import torch
import torch.nn as nn
import torch.nn.functional as nnf
import torch.utils
class ResizeTransform(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, x):
if self.factor < 1:
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
x = self.factor * x
elif self.factor > 1:
x = self.factor * x
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'vel_resize': 4, 'ndims': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ResizeTransformNew(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Alison-brie/AutoReg
|
ResizeTransform
| false
| 16,864
|
[
"MIT"
] | 10
|
a23d45a6f7c6e47f61430e1565dda316452a4418
|
https://github.com/Alison-brie/AutoReg/tree/a23d45a6f7c6e47f61430e1565dda316452a4418
|
Conv2d
|
import torch
import torch.utils.data
import torch.nn.functional as F
class Conv2d(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if not torch.jit.is_scripting():
if x.numel() == 0 and self.training:
assert not isinstance(self.norm, torch.nn.SyncBatchNorm
), 'SyncBatchNorm does not support empty inputs!'
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
class Conv2dNew(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AbirKhan96/facebook-detectron2
|
Conv2d
| false
| 16,865
|
[
"Apache-2.0"
] | 5
|
6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
Linear_softmax
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear_softmax(nn.Module):
def __init__(self, inp, out):
super(Linear_softmax, self).__init__()
self.f1 = nn.Linear(inp, out)
def forward(self, x):
x = self.f1(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp': 4, 'out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class Linear_softmaxNew(nn.Module):
def __init__(self, inp, out):
super(Linear_softmaxNew, self).__init__()
self.f1 = nn.Linear(inp, out)
def forward(self, input_0):
primals_1 = self.f1.weight
primals_2 = self.f1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Alfo5123/ConcreteDropout
|
Linear_softmax
| false
| 16,866
|
[
"MIT"
] | 7
|
c442871553e20a2de078c0fbac7fa52302d50abf
|
https://github.com/Alfo5123/ConcreteDropout/tree/c442871553e20a2de078c0fbac7fa52302d50abf
|
IndepAnisotropicGaussianUVLoss
|
import math
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class IndepAnisotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u:
'torch.Tensor', kappa_u_est: 'torch.Tensor', kappa_v_est:
'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'):
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2
delta_u = u - target_u
delta_v = v - target_v
delta_sqnorm = delta_u ** 2 + delta_v ** 2
delta_u_r_u = delta_u * kappa_u_est
delta_v_r_v = delta_v * kappa_v_est
delta_r = delta_u_r_u + delta_v_r_v
delta_r_sqnorm = delta_r ** 2
denom2 = sigma2 * (sigma2 + r_sqnorm2)
loss = 0.5 * (self.log2pi + torch.log(denom2) + delta_sqnorm /
sigma2 - delta_r_sqnorm / denom2)
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_lower_bound': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp18 = tl.load(in_ptr3 + r0, None)
tmp19 = tl.load(in_ptr4 + r0, None)
tmp22 = tl.load(in_ptr5 + r0, None)
tmp23 = tl.load(in_ptr6 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return buf1,
class IndepAnisotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return output[0]
|
AbirKhan96/facebook-detectron2
|
IndepAnisotropicGaussianUVLoss
| false
| 16,867
|
[
"Apache-2.0"
] | 5
|
6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
|
TrueDynamics
|
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class TrueDynamics(nn.Module):
def __init__(self, env, hidden_size=200, drop_prob=0.0):
super().__init__()
self.env = env
self.hidden_size = hidden_size
self.drop_prob = drop_prob
self.mask1 = None
def forward(self, x):
th = x[:, 0]
thdot = x[:, 1]
u = torch.clamp(x[:, 2], -3, 3)
g = 9.82
m = 1.0
l = 1.0
dt = 0.08
newthdot = thdot + (-3 * g / (2 * l) * torch.sin(th + np.pi) + 3.0 /
(m * l ** 2) * u) * dt
newth = th + newthdot * dt
newthdot = torch.clamp(newthdot, -8, 8)
return torch.stack([newth, newthdot], 1)
def set_sampling(self, sampling=None, batch_size=None):
if sampling is None:
raise ValueError('Sampling cannot be None.')
self.sampling = sampling
if self.sampling:
self.mask1 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask2 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask1 /= 1 - self.drop_prob
self.mask2 /= 1 - self.drop_prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'env': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp7 = 3.141592653589793
tmp8 = tmp5 + tmp7
tmp9 = tl_math.sin(tmp8)
tmp10 = -14.73
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp13 = -3.0
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 3.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 * tmp15
tmp18 = tmp11 + tmp17
tmp19 = 0.08
tmp20 = tmp18 * tmp19
tmp21 = tmp6 + tmp20
tmp22 = tmp21 * tmp19
tmp23 = tmp5 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp29 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask,
other=0.0)
tmp31 = tmp30 + tmp7
tmp32 = tl_math.sin(tmp31)
tmp33 = tmp32 * tmp10
tmp34 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 &
xmask, other=0.0)
tmp35 = triton_helpers.maximum(tmp34, tmp13)
tmp36 = triton_helpers.minimum(tmp35, tmp15)
tmp37 = tmp36 * tmp15
tmp38 = tmp33 + tmp37
tmp39 = tmp38 * tmp19
tmp40 = tmp29 + tmp39
tmp41 = -8.0
tmp42 = triton_helpers.maximum(tmp40, tmp41)
tmp43 = 8.0
tmp44 = triton_helpers.minimum(tmp42, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp26, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp25, tmp46)
tl.store(out_ptr0 + x3, tmp47, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 2, 4, 4), (32, 16, 4, 1), 0),
class TrueDynamicsNew(nn.Module):
def __init__(self, env, hidden_size=200, drop_prob=0.0):
super().__init__()
self.env = env
self.hidden_size = hidden_size
self.drop_prob = drop_prob
self.mask1 = None
def set_sampling(self, sampling=None, batch_size=None):
if sampling is None:
raise ValueError('Sampling cannot be None.')
self.sampling = sampling
if self.sampling:
self.mask1 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask2 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask1 /= 1 - self.drop_prob
self.mask2 /= 1 - self.drop_prob
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Alfo5123/ConcreteDropout
|
TrueDynamics
| false
| 16,868
|
[
"MIT"
] | 7
|
c442871553e20a2de078c0fbac7fa52302d50abf
|
https://github.com/Alfo5123/ConcreteDropout/tree/c442871553e20a2de078c0fbac7fa52302d50abf
|
EqualConv2d
|
import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, buf0, primals_3, buf0
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2dNew(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input_0):
primals_2 = self.conv.bias
primals_1 = self.conv.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AaltoVision/balanced-pioneer
|
EqualConv2d
| false
| 16,869
|
[
"MIT"
] | 5
|
51f58080fd2db3159de3e1ccb47f38e03220faf0
|
https://github.com/AaltoVision/balanced-pioneer/tree/51f58080fd2db3159de3e1ccb47f38e03220faf0
|
PixelNorm
|
import torch
from torch import nn
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=
True) + 1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelNormNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AaltoVision/balanced-pioneer
|
PixelNorm
| false
| 16,870
|
[
"MIT"
] | 5
|
51f58080fd2db3159de3e1ccb47f38e03220faf0
|
https://github.com/AaltoVision/balanced-pioneer/tree/51f58080fd2db3159de3e1ccb47f38e03220faf0
|
EmbeddingModule
|
import torch
import torch.nn as nn
class EmbeddingModule(nn.Module):
def __init__(self, in_channels, desc_channels):
super(EmbeddingModule, self).__init__()
self.pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, desc_channels)
def forward(self, x):
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'desc_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
class EmbeddingModuleNew(nn.Module):
def __init__(self, in_channels, desc_channels):
super(EmbeddingModuleNew, self).__init__()
self.pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, desc_channels)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ActiveVisionLab/LaLaLoc
|
EmbeddingModule
| false
| 16,872
|
[
"MIT"
] | 5
|
21a0da94fbe7ef6cce3d34c6a5f47cc09d072f45
|
https://github.com/ActiveVisionLab/LaLaLoc/tree/21a0da94fbe7ef6cce3d34c6a5f47cc09d072f45
|
DiceLoss
|
import torch
import torch.nn as nn
def flatten_channels(inputs, targets, channel_dim):
"""
Helper function to flatten inputs and targets for each channel
E.g., (1, 3, 10, 256, 256) --> (3, 655360)
Parameters
----------
inputs: torch.Tensor
U-net output
targets: torch.Tensor
Target labels
channel_dim: int
Which dim represents output channels?
"""
order = [channel_dim]
for i in range(len(inputs.shape)):
if i != channel_dim:
order.append(i)
inputs = inputs.permute(*order)
inputs = torch.flatten(inputs, start_dim=1)
targets = targets.permute(*order)
targets = torch.flatten(targets, start_dim=1)
return inputs, targets
class DiceLoss(nn.Module):
"""
DiceLoss: 1 - DICE coefficient
Adaptations: weights output channels equally in final loss.
This is necessary for anisotropic data.
"""
def __init__(self, weight=None, size_average=True):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, channel_dim=1, smooth=1):
"""
inputs: torch.tensor
Network predictions. Float
targets: torch.tensor
Ground truth labels. Float
channel_dim: int
Dimension in which output channels can be found.
Loss is weighted equally between output channels.
smooth: int
Smoothing hyperparameter.
"""
inputs, targets = flatten_channels(inputs, targets, channel_dim)
intersection = (inputs * targets).sum(-1)
dice = (2.0 * intersection + smooth) / (inputs.sum(-1) + targets.
sum(-1) + smooth)
loss = 1 - dice
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = tmp3 - tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 4.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
def flatten_channels(inputs, targets, channel_dim):
"""
Helper function to flatten inputs and targets for each channel
E.g., (1, 3, 10, 256, 256) --> (3, 655360)
Parameters
----------
inputs: torch.Tensor
U-net output
targets: torch.Tensor
Target labels
channel_dim: int
Which dim represents output channels?
"""
order = [channel_dim]
for i in range(len(inputs.shape)):
if i != channel_dim:
order.append(i)
inputs = inputs.permute(*order)
inputs = torch.flatten(inputs, start_dim=1)
targets = targets.permute(*order)
targets = torch.flatten(targets, start_dim=1)
return inputs, targets
class DiceLossNew(nn.Module):
"""
DiceLoss: 1 - DICE coefficient
Adaptations: weights output channels equally in final loss.
This is necessary for anisotropic data.
"""
def __init__(self, weight=None, size_average=True):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
AbigailMcGovern/iterseg
|
DiceLoss
| false
| 16,873
|
[
"BSD-3-Clause"
] | 4
|
d23af04c52c8d1711a474a58060abea664a82637
|
https://github.com/AbigailMcGovern/iterseg/tree/d23af04c52c8d1711a474a58060abea664a82637
|
QNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
self.action_size = action_size
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf5, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0), primals_6, buf5, primals_4, buf6
class QNetworkNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
self.action_size = action_size
super(QNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
AlexS28/SABER
|
QNetwork
| false
| 16,874
|
[
"BSD-3-Clause"
] | 4
|
91f74319a41f473b8e8f9eff6b7d9b604b94c7da
|
https://github.com/AlexS28/SABER/tree/91f74319a41f473b8e8f9eff6b7d9b604b94c7da
|
Capsule_conv
|
import torch
import torch.nn as nn
def Squash(x):
l2norm = x.norm(dim=-1, keepdim=True)
unit_v = x / l2norm
squashed_v = l2norm.pow(2) / (1 + l2norm.pow(2))
x = unit_v * squashed_v
return x
class Capsule_conv(nn.Module):
def __init__(self, in_channels, out_channels, cap_dim):
super(Capsule_conv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.cap_dim = cap_dim
self.kernel_size = 9
self.stride = 2
self.conv = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.out_channels * self.cap_dim, kernel_size=self.kernel_size,
stride=self.stride)
def forward(self, x):
"""
:param x: shape = 256 x 20 x 20. Output of convolution operation
:return: output of primary capsules
"""
x = self.conv(x)
x = x.view(x.shape[0], -1, self.cap_dim)
x = Squash(x)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'cap_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tmp14 = tmp12 * tmp12
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp14 / tmp16
tmp18 = tmp13 * tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(50176)](buf1, primals_2, 50176,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 3136, 4), (12544, 4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1[grid(50176)](buf1
, buf2, 50176, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def Squash(x):
l2norm = x.norm(dim=-1, keepdim=True)
unit_v = x / l2norm
squashed_v = l2norm.pow(2) / (1 + l2norm.pow(2))
x = unit_v * squashed_v
return x
class Capsule_convNew(nn.Module):
def __init__(self, in_channels, out_channels, cap_dim):
super(Capsule_convNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.cap_dim = cap_dim
self.kernel_size = 9
self.stride = 2
self.conv = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.out_channels * self.cap_dim, kernel_size=self.kernel_size,
stride=self.stride)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AahanSingh/Capsule-Networks
|
Capsule_conv
| false
| 16,875
|
[
"MIT"
] | 5
|
798014b6ff5fe27abdc64d3af364fb7681f292fc
|
https://github.com/AahanSingh/Capsule-Networks/tree/798014b6ff5fe27abdc64d3af364fb7681f292fc
|
DuelingMLP
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DuelingMLP(nn.Module):
def __init__(self, state_size, num_actions):
super().__init__()
self.linear = nn.Linear(state_size, 256)
self.value_head = nn.Linear(256, 1)
self.advantage_head = nn.Linear(256, num_actions)
def forward(self, x):
x = x.unsqueeze(0) if len(x.size()) == 1 else x
x = F.relu(self.linear(x))
value = self.value_head(x)
advantage = self.advantage_head(x)
action_values = (value + (advantage - advantage.mean(dim=1, keepdim
=True))).squeeze()
return action_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_mean_squeeze_sub_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x5 = xindex
x3 = xindex // 64
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x5, xmask)
tmp5 = tl.load(in_ptr2 + (x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (16 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr2 + (32 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (48 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tmp0 + tmp2
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp3 + tmp14
tl.store(out_ptr0 + x5, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 4), (4, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (1, 256), (256, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_3, buf5, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 1), (1, 256), 0), out=buf2)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_squeeze_sub_1[grid(256)](buf2, primals_5,
buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_5
return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), primals_6, primals_4, buf5
class DuelingMLPNew(nn.Module):
def __init__(self, state_size, num_actions):
super().__init__()
self.linear = nn.Linear(state_size, 256)
self.value_head = nn.Linear(256, 1)
self.advantage_head = nn.Linear(256, num_actions)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_4 = self.value_head.weight
primals_5 = self.value_head.bias
primals_6 = self.advantage_head.weight
primals_7 = self.advantage_head.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
AlexHermansson/hindsight-experience-replay
|
DuelingMLP
| false
| 16,876
|
[
"MIT"
] | 5
|
65468d523bb803123d7aab9bb83abc7a3d5db3c8
|
https://github.com/AlexHermansson/hindsight-experience-replay/tree/65468d523bb803123d7aab9bb83abc7a3d5db3c8
|
NMFNet
|
import torch
import torch.nn as nn
from numpy.random import uniform
from numpy.linalg import pinv
class NMFNet(nn.Module):
"""NMF implemented as a neural network"""
def __init__(self, X_height, k):
"""Params
X_height: TODO INSERT DESC HERE
k: TODO INSERT DESC HERE
"""
super(NMFNet, self).__init__()
self.k = k
W_numpy = uniform(0, 1, (X_height, k))
W_numpy = W_numpy / W_numpy.sum(0)[None, :]
self.W = nn.Parameter(torch.FloatTensor(W_numpy))
self.W_inv = nn.Parameter(torch.FloatTensor(pinv(W_numpy)))
self.relu = nn.ReLU()
self.sm = nn.Softmax(dim=0)
def forward(self, X):
H = self.get_H(X)
X_hat = self.sm(self.W) @ H
return X_hat
def get_H(self, X):
return self.relu(self.W_inv @ X)
def get_W(self):
return self.sm(self.W)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'X_height': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from numpy.random import uniform
from numpy.linalg import pinv
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_relu_threshold_backward_4(in_ptr0, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](primals_3, buf2, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(256)](buf1, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_0[grid(64, 4)](buf5, buf6, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clone_relu_threshold_backward_4[grid(64, 4)](buf1,
buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del buf1
return buf6, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf7
class NMFNetNew(nn.Module):
"""NMF implemented as a neural network"""
def __init__(self, X_height, k):
"""Params
X_height: TODO INSERT DESC HERE
k: TODO INSERT DESC HERE
"""
super(NMFNetNew, self).__init__()
self.k = k
W_numpy = uniform(0, 1, (X_height, k))
W_numpy = W_numpy / W_numpy.sum(0)[None, :]
self.W = nn.Parameter(torch.FloatTensor(W_numpy))
self.W_inv = nn.Parameter(torch.FloatTensor(pinv(W_numpy)))
self.relu = nn.ReLU()
self.sm = nn.Softmax(dim=0)
def get_H(self, X):
return self.relu(self.W_inv @ X)
def get_W(self):
return self.sm(self.W)
def forward(self, input_0):
primals_1 = self.W
primals_3 = self.W_inv
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Aaron09/torchfactor
|
NMFNet
| false
| 16,877
|
[
"MIT"
] | 5
|
66782a183c583e3056e2c40d8d95568f4abb9537
|
https://github.com/Aaron09/torchfactor/tree/66782a183c583e3056e2c40d8d95568f4abb9537
|
PolicyNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PolicyNet(nn.Module):
def __init__(self):
super(PolicyNet, self).__init__()
self.fc1 = nn.Linear(4, 24)
self.fc2 = nn.Linear(24, 36)
self.fc3 = nn.Linear(36, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 36
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (36, 24), (24, 1))
assert_size_stride(primals_5, (36,), (1,))
assert_size_stride(primals_6, (1, 36), (36, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf1,
primals_2, buf7, 1536, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 36), (36, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0),
reinterpret_tensor(primals_4, (24, 36), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 36), (576, 144, 36, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(2304)](buf3,
primals_5, buf6, 2304, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 36), (36, 1), 0),
reinterpret_tensor(primals_6, (36, 1), (1, 36), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(
buf3, (64, 36), (36, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class PolicyNetNew(nn.Module):
def __init__(self):
super(PolicyNetNew, self).__init__()
self.fc1 = nn.Linear(4, 24)
self.fc2 = nn.Linear(24, 36)
self.fc3 = nn.Linear(36, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Alfo5123/ConcreteDropout
|
PolicyNet
| false
| 16,878
|
[
"MIT"
] | 7
|
c442871553e20a2de078c0fbac7fa52302d50abf
|
https://github.com/Alfo5123/ConcreteDropout/tree/c442871553e20a2de078c0fbac7fa52302d50abf
|
Conv2dSame
|
import torch
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
from typing import Optional
from typing import Tuple
import torch.nn.parallel
import torch.optim
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def conv2d_same(input_, weight: 'torch.Tensor', bias:
'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1),
padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1),
groups: 'int'=1):
"""conv2d with same padding"""
input_height, input_width = input_.size()[-2:]
kernel_height, kernel_width = weight.size()[-2:]
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
input_ = F.pad(input_, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2])
return F.conv2d(input_, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, input_):
return conv2d_same(input_, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
from typing import Optional
from typing import Tuple
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_3, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, buf0
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def conv2d_same(input_, weight: 'torch.Tensor', bias:
'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1),
padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1),
groups: 'int'=1):
"""conv2d with same padding"""
input_height, input_width = input_.size()[-2:]
kernel_height, kernel_width = weight.size()[-2:]
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
input_ = F.pad(input_, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2])
return F.conv2d(input_, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSameNew(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Adlik/zen_nas
|
Conv2dSame
| false
| 16,879
|
[
"Apache-2.0"
] | 7
|
d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
https://github.com/Adlik/zen_nas/tree/d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
TokenEmbedding
|
import torch
import torch.nn as nn
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 24
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 6
x2 = xindex
y1 = yindex // 6
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp13 = float('nan')
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK
=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(64)](buf3, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1
class TokenEmbeddingNew(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbeddingNew, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, input_0):
primals_2 = self.tokenConv.weight
primals_3 = self.tokenConv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AdamLohSg/GTA
|
TokenEmbedding
| false
| 16,882
|
[
"Apache-2.0"
] | 8
|
bf6a745a6e28e365466e76360a15ca10ce61e009
|
https://github.com/AdamLohSg/GTA/tree/bf6a745a6e28e365466e76360a15ca10ce61e009
|
FactorizedReduce
|
import torch
import torch.nn as nn
import torch.utils
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv3d(C_in, C_out // 2, 1, stride=2, padding=0,
bias=False)
self.conv_2 = nn.Conv3d(C_in, C_out // 2, 1, stride=2, padding=0,
bias=False)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'C_in': 4, 'C_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 8 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-2 + x1) + 8 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (2, 4, 1, 1, 1), (4, 1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4,
4, 4), (0, 64, 16, 4, 1), 0), primals_2, stride=(2, 2, 2),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 2, 2, 2, 2), (16, 8, 4, 2, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4,
3, 3), (0, 64, 16, 4, 1), 5), primals_3, stride=(2, 2, 2),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 2, 2, 2, 2), (16, 8, 4, 2, 1))
buf3 = empty_strided_cuda((2, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf1, buf2, buf3, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf1
del buf2
return buf3, primals_2, primals_3, reinterpret_tensor(buf0, (1, 4, 4, 4,
4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 4, 3,
3), (256, 64, 16, 4, 1), 5)
class FactorizedReduceNew(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduceNew, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv3d(C_in, C_out // 2, 1, stride=2, padding=0,
bias=False)
self.conv_2 = nn.Conv3d(C_in, C_out // 2, 1, stride=2, padding=0,
bias=False)
def forward(self, input_0):
primals_2 = self.conv_1.weight
primals_3 = self.conv_2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Alison-brie/AutoReg
|
FactorizedReduce
| false
| 16,883
|
[
"MIT"
] | 10
|
a23d45a6f7c6e47f61430e1565dda316452a4418
|
https://github.com/Alison-brie/AutoReg/tree/a23d45a6f7c6e47f61430e1565dda316452a4418
|
TransitionUp
|
import torch
import torch.nn as nn
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 8
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 128
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (20 + x0 + 9 * x1 + 81 * x2 + 324 * x3), tmp4 &
xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x4 + 16 * (-4 + x2) + 64 * x3), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1,
512, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_4
return buf1, primals_1, primals_3
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUpNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, input_0, input_1):
primals_1 = self.convTrans.weight
primals_2 = self.convTrans.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Alfo5123/ConcreteDropout
|
TransitionUp
| false
| 16,885
|
[
"MIT"
] | 7
|
c442871553e20a2de078c0fbac7fa52302d50abf
|
https://github.com/Alfo5123/ConcreteDropout/tree/c442871553e20a2de078c0fbac7fa52302d50abf
|
PositionwiseFeedForward
|
import torch
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class PositionwiseFeedForwardNew(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_2 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Aminah92/saint
|
PositionwiseFeedForward
| false
| 16,886
|
[
"MIT"
] | 7
|
e18f5d5d093dce458c7d427eed4a375021c05bb9
|
https://github.com/Aminah92/saint/tree/e18f5d5d093dce458c7d427eed4a375021c05bb9
|
FF
|
import torch
from torch import nn
class FF(nn.Module):
"""
Feed-forward in a transformer layer.
"""
def __init__(self, input_size, hidden_size):
super().__init__()
self.lin_1 = nn.Linear(input_size, hidden_size)
self.lin_2 = nn.Linear(hidden_size, input_size)
self.relu = nn.ReLU()
def forward(self, x):
output = self.lin_2(self.relu(self.lin_1(x)))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class FFNew(nn.Module):
"""
Feed-forward in a transformer layer.
"""
def __init__(self, input_size, hidden_size):
super().__init__()
self.lin_1 = nn.Linear(input_size, hidden_size)
self.lin_2 = nn.Linear(hidden_size, input_size)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.lin_1.weight
primals_2 = self.lin_1.bias
primals_4 = self.lin_2.weight
primals_5 = self.lin_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Altair-inc/behaviour-seq-transformer
|
FF
| false
| 16,887
|
[
"MIT"
] | 10
|
74185eb5588b1e57a936de9901313dddcc10acf4
|
https://github.com/Altair-inc/behaviour-seq-transformer/tree/74185eb5588b1e57a936de9901313dddcc10acf4
|
AdaptiveAvgPool
|
import torch
import uuid
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.parallel
import torch.optim
def _get_right_parentheses_index_(struct_str):
"""get the position of the first right parenthese in string"""
left_paren_count = 0
for index, single_char in enumerate(struct_str):
if single_char == '(':
left_paren_count += 1
elif single_char == ')':
left_paren_count -= 1
if left_paren_count == 0:
return index
else:
pass
return None
class PlainNetBasicBlockClass(nn.Module):
"""BasicBlock base class"""
def __init__(self, in_channels=None, out_channels=None, stride=1,
no_create=False, block_name=None, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.no_create = no_create
self.block_name = block_name
if self.block_name is None:
self.block_name = f'uuid{uuid.uuid4().hex}'
def forward(self, input_):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def __str__(self):
return type(self
).__name__ + f'({self.in_channels},{self.out_channels},{self.stride})'
def __repr__(self):
return (type(self).__name__ +
f'({self.block_name}|{self.in_channels},{self.out_channels},{self.stride})'
)
def get_output_resolution(self, input_resolution):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def get_FLOPs(self, input_resolution):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def get_model_size(self):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def set_in_channels(self, channels):
"""subclass implementation"""
raise RuntimeError('Not implemented')
@classmethod
def create_from_str(cls, struct_str, no_create=False, **kwargs):
""" class method
:param s (str): basicblock str
:return cls instance
"""
assert PlainNetBasicBlockClass.is_instance_from_str(struct_str)
idx = _get_right_parentheses_index_(struct_str)
assert idx is not None
param_str = struct_str[len(cls.__name__ + '('):idx]
tmp_idx = param_str.find('|')
if tmp_idx < 0:
tmp_block_name = f'uuid{uuid.uuid4().hex}'
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[tmp_idx + 1:]
param_str_split = param_str.split(',')
in_channels = int(param_str_split[0])
out_channels = int(param_str_split[1])
stride = int(param_str_split[2])
return cls(in_channels=in_channels, out_channels=out_channels,
stride=stride, block_name=tmp_block_name, no_create=no_create
), struct_str[idx + 1:]
@classmethod
def is_instance_from_str(cls, struct_str):
if struct_str.startswith(cls.__name__ + '(') and struct_str[-1] == ')':
return True
return False
class AdaptiveAvgPool(PlainNetBasicBlockClass):
"""Adaptive average pool layer"""
def __init__(self, out_channels, output_size, no_create=False, **kwargs):
super().__init__(**kwargs)
self.in_channels = out_channels
self.out_channels = out_channels
self.output_size = output_size
self.no_create = no_create
if not no_create:
self.netblock = nn.AdaptiveAvgPool2d(output_size=(self.
output_size, self.output_size))
def forward(self, input_):
return self.netblock(input_)
def __str__(self):
return (type(self).__name__ +
f'({self.out_channels // self.output_size ** 2},{self.output_size})'
)
def __repr__(self):
return (type(self).__name__ +
f'({self.block_name}|{self.out_channels // self.output_size ** 2}, {self.output_size})'
)
def get_output_resolution(self, input_resolution):
return self.output_size
def get_FLOPs(self, input_resolution):
return 0
def get_model_size(self):
return 0
def set_in_channels(self, channels):
self.in_channels = channels
self.out_channels = channels
@classmethod
def create_from_str(cls, struct_str, no_create=False, **kwargs):
assert AdaptiveAvgPool.is_instance_from_str(struct_str)
idx = _get_right_parentheses_index_(struct_str)
assert idx is not None
param_str = struct_str[len('AdaptiveAvgPool('):idx]
tmp_idx = param_str.find('|')
if tmp_idx < 0:
tmp_block_name = f'uuid{uuid.uuid4().hex}'
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[tmp_idx + 1:]
param_str_split = param_str.split(',')
out_channels = int(param_str_split[0])
output_size = int(param_str_split[1])
return AdaptiveAvgPool(out_channels=out_channels, output_size=
output_size, block_name=tmp_block_name, no_create=no_create
), struct_str[idx + 1:]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_channels': 4, 'output_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import uuid
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__adaptive_avg_pool2d_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def _get_right_parentheses_index_(struct_str):
"""get the position of the first right parenthese in string"""
left_paren_count = 0
for index, single_char in enumerate(struct_str):
if single_char == '(':
left_paren_count += 1
elif single_char == ')':
left_paren_count -= 1
if left_paren_count == 0:
return index
else:
pass
return None
class PlainNetBasicBlockClass(nn.Module):
"""BasicBlock base class"""
def __init__(self, in_channels=None, out_channels=None, stride=1,
no_create=False, block_name=None, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.no_create = no_create
self.block_name = block_name
if self.block_name is None:
self.block_name = f'uuid{uuid.uuid4().hex}'
def forward(self, input_):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def __str__(self):
return type(self
).__name__ + f'({self.in_channels},{self.out_channels},{self.stride})'
def __repr__(self):
return (type(self).__name__ +
f'({self.block_name}|{self.in_channels},{self.out_channels},{self.stride})'
)
def get_output_resolution(self, input_resolution):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def get_FLOPs(self, input_resolution):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def get_model_size(self):
"""subclass implementation"""
raise RuntimeError('Not implemented')
def set_in_channels(self, channels):
"""subclass implementation"""
raise RuntimeError('Not implemented')
@classmethod
def create_from_str(cls, struct_str, no_create=False, **kwargs):
""" class method
:param s (str): basicblock str
:return cls instance
"""
assert PlainNetBasicBlockClass.is_instance_from_str(struct_str)
idx = _get_right_parentheses_index_(struct_str)
assert idx is not None
param_str = struct_str[len(cls.__name__ + '('):idx]
tmp_idx = param_str.find('|')
if tmp_idx < 0:
tmp_block_name = f'uuid{uuid.uuid4().hex}'
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[tmp_idx + 1:]
param_str_split = param_str.split(',')
in_channels = int(param_str_split[0])
out_channels = int(param_str_split[1])
stride = int(param_str_split[2])
return cls(in_channels=in_channels, out_channels=out_channels,
stride=stride, block_name=tmp_block_name, no_create=no_create
), struct_str[idx + 1:]
@classmethod
def is_instance_from_str(cls, struct_str):
if struct_str.startswith(cls.__name__ + '(') and struct_str[-1] == ')':
return True
return False
class AdaptiveAvgPoolNew(PlainNetBasicBlockClass):
"""Adaptive average pool layer"""
def __init__(self, out_channels, output_size, no_create=False, **kwargs):
super().__init__(**kwargs)
self.in_channels = out_channels
self.out_channels = out_channels
self.output_size = output_size
self.no_create = no_create
if not no_create:
self.netblock = nn.AdaptiveAvgPool2d(output_size=(self.
output_size, self.output_size))
def __str__(self):
return (type(self).__name__ +
f'({self.out_channels // self.output_size ** 2},{self.output_size})'
)
def __repr__(self):
return (type(self).__name__ +
f'({self.block_name}|{self.out_channels // self.output_size ** 2}, {self.output_size})'
)
def get_output_resolution(self, input_resolution):
return self.output_size
def get_FLOPs(self, input_resolution):
return 0
def get_model_size(self):
return 0
def set_in_channels(self, channels):
self.in_channels = channels
self.out_channels = channels
@classmethod
def create_from_str(cls, struct_str, no_create=False, **kwargs):
assert AdaptiveAvgPoolNew.is_instance_from_str(struct_str)
idx = _get_right_parentheses_index_(struct_str)
assert idx is not None
param_str = struct_str[len('AdaptiveAvgPool('):idx]
tmp_idx = param_str.find('|')
if tmp_idx < 0:
tmp_block_name = f'uuid{uuid.uuid4().hex}'
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[tmp_idx + 1:]
param_str_split = param_str.split(',')
out_channels = int(param_str_split[0])
output_size = int(param_str_split[1])
return AdaptiveAvgPoolNew(out_channels=out_channels, output_size=
output_size, block_name=tmp_block_name, no_create=no_create
), struct_str[idx + 1:]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Adlik/zen_nas
|
AdaptiveAvgPool
| false
| 16,888
|
[
"Apache-2.0"
] | 7
|
d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
https://github.com/Adlik/zen_nas/tree/d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
Mish
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aditya239233/MDP
|
Mish
| false
| 16,889
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
LayerNorm
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNormNew, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, input_0):
primals_2 = self.a_2
primals_3 = self.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Aminah92/saint
|
LayerNorm
| false
| 16,890
|
[
"MIT"
] | 7
|
e18f5d5d093dce458c7d427eed4a375021c05bb9
|
https://github.com/Aminah92/saint/tree/e18f5d5d093dce458c7d427eed4a375021c05bb9
|
GCN
|
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.nn.parallel
import torch.optim
from math import *
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
input = input.view(-1, input.size(-1)).contiguous()
support = torch.mm(input, self.weight)
support = support.view(adj.size(0), -1, support.size(-1)).contiguous()
output = torch.bmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.parallel
import torch.optim
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
buf2 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(64)](buf2,
primals_4, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
primals_5, out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf3, (4, 4, 4), (
16, 4, 1), 0), out=buf4)
del buf3
buf5 = buf4
del buf4
triton_poi_fused_add_1[grid(64)](buf5, primals_6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
return buf5, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(
primals_5, (4, 4), (1, 4), 0), buf6, reinterpret_tensor(primals_1,
(4, 16), (1, 4), 0)
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
input = input.view(-1, input.size(-1)).contiguous()
support = torch.mm(input, self.weight)
support = support.view(adj.size(0), -1, support.size(-1)).contiguous()
output = torch.bmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_2 = self.gc1.weight
primals_4 = self.gc1.bias
primals_5 = self.gc2.weight
primals_6 = self.gc2.bias
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Alvin-Zeng/GCM
|
GCN
| false
| 16,891
|
[
"BSD-3-Clause"
] | 6
|
521de2a290ace289cdc5935195d0284f717504c3
|
https://github.com/Alvin-Zeng/GCM/tree/521de2a290ace289cdc5935195d0284f717504c3
|
TemporalEmbedding
|
import math
import torch
import torch.nn as nn
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type='fixed', data='ETTh'):
super(TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if data == 'ETTm':
self.minute_embed = Embed(minute_size, d_model)
elif data == 'SolarEnergy':
minute_size = 6
self.minute_embed = Embed(minute_size, d_model)
elif data == 'WADI':
minute_size = 60
second_size = 6
self.minute_embed = Embed(minute_size, d_model)
self.second_emebd = Embed(second_size, d_model)
elif data == 'SMAP':
minute_size = 60
second_size = 15
self.minute_embed = Embed(minute_size, d_model)
self.second_emebd = Embed(second_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
second_x = self.second_emebd(x[:, :, 5]) if hasattr(self,
'second_embed') else 0.0
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x + second_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 24, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 24) | ~xmask,
'index out of bounds: 0 <= tmp5 < 24')
tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp5), xmask)
tmp9 = tmp8.to(tl.int64)
tmp10 = tl.full([XBLOCK], 7, tl.int32)
tmp11 = tmp9 + tmp10
tmp12 = tmp9 < 0
tmp13 = tl.where(tmp12, tmp11, tmp9)
tl.device_assert((0 <= tmp13) & (tmp13 < 7) | ~xmask,
'index out of bounds: 0 <= tmp13 < 7')
tmp15 = tl.load(in_ptr2 + (x0 + 4 * tmp13), xmask)
tmp16 = tmp7 + tmp15
tmp18 = tmp17.to(tl.int64)
tmp19 = tl.full([XBLOCK], 32, tl.int32)
tmp20 = tmp18 + tmp19
tmp21 = tmp18 < 0
tmp22 = tl.where(tmp21, tmp20, tmp18)
tl.device_assert((0 <= tmp22) & (tmp22 < 32) | ~xmask,
'index out of bounds: 0 <= tmp22 < 32')
tmp24 = tl.load(in_ptr3 + (x0 + 4 * tmp22), xmask)
tmp25 = tmp16 + tmp24
tmp27 = tmp26.to(tl.int64)
tmp28 = tl.full([XBLOCK], 13, tl.int32)
tmp29 = tmp27 + tmp28
tmp30 = tmp27 < 0
tmp31 = tl.where(tmp30, tmp29, tmp27)
tl.device_assert((0 <= tmp31) & (tmp31 < 13) | ~xmask,
'index out of bounds: 0 <= tmp31 < 13')
tmp33 = tl.load(in_ptr4 + (x0 + 4 * tmp31), xmask)
tmp34 = tmp25 + tmp33
tmp35 = 0.0
tmp36 = tmp34 + tmp35
tmp37 = tmp36 + tmp35
tl.store(out_ptr0 + x4, tmp37, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (24, 4), (4, 1))
assert_size_stride(arg2_1, (7, 4), (4, 1))
assert_size_stride(arg3_1, (32, 4), (4, 1))
assert_size_stride(arg4_1, (13, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_embedding_0[grid(256)](arg0_1, arg1_1, arg2_1,
arg3_1, arg4_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return buf0,
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbeddingNew(nn.Module):
def __init__(self, d_model, embed_type='fixed', data='ETTh'):
super(TemporalEmbeddingNew, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if data == 'ETTm':
self.minute_embed = Embed(minute_size, d_model)
elif data == 'SolarEnergy':
minute_size = 6
self.minute_embed = Embed(minute_size, d_model)
elif data == 'WADI':
minute_size = 60
second_size = 6
self.minute_embed = Embed(minute_size, d_model)
self.second_emebd = Embed(second_size, d_model)
elif data == 'SMAP':
minute_size = 60
second_size = 15
self.minute_embed = Embed(minute_size, d_model)
self.second_emebd = Embed(second_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, input_0):
arg1_1 = self.hour_embed.emb.weight
arg2_1 = self.weekday_embed.emb.weight
arg3_1 = self.day_embed.emb.weight
arg4_1 = self.month_embed.emb.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
|
AdamLohSg/GTA
|
TemporalEmbedding
| false
| 16,892
|
[
"Apache-2.0"
] | 8
|
bf6a745a6e28e365466e76360a15ca10ce61e009
|
https://github.com/AdamLohSg/GTA/tree/bf6a745a6e28e365466e76360a15ca10ce61e009
|
Hardswish
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Hardswish(nn.Module):
@staticmethod
def forward(x):
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardswishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aditya239233/MDP
|
Hardswish
| false
| 16,893
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
MDN
|
from torch.nn import Module
import torch
from torch.nn.modules import Module
from torch.nn.modules import Linear
class MDN(Module):
def __init__(self, input_size, num_mixtures):
super(MDN, self).__init__()
self.input_size = input_size
self.num_mixtures = num_mixtures
self.parameter_layer = Linear(in_features=input_size, out_features=
1 + 6 * num_mixtures)
def forward(self, input_, bias=None):
mixture_parameters = self.parameter_layer(input_)
eos_hat = mixture_parameters[:, :, 0:1]
pi_hat, mu1_hat, mu2_hat, sigma1_hat, sigma2_hat, rho_hat = (torch.
chunk(mixture_parameters[:, :, 1:], 6, dim=2))
eos = torch.sigmoid(-eos_hat)
mu1 = mu1_hat
mu2 = mu2_hat
rho = torch.tanh(rho_hat)
if bias is None:
bias = torch.zeros_like(rho)
pi = torch.softmax(pi_hat * (1 + bias), dim=2)
sigma1 = torch.exp(sigma1_hat - bias)
sigma2 = torch.exp(sigma2_hat - bias)
return eos, pi, mu1, mu2, sigma1, sigma2, rho
def __repr__(self):
s = '{name}(input_size={input_size}, num_mixtures={num_mixtures})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_mixtures': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
from torch.nn.modules import Module
from torch.nn.modules import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_neg_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 25 * x0, xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp2 = tl.sigmoid(tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (21 + x0 + 25 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (1 + x0 + 25 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (1 + 25 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (2 + 25 * x1), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr0 + (3 + 25 * x1), xmask, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr0 + (4 + 25 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_exp_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (13 + x0 + 25 * x1), xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_exp_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (17 + x0 + 25 * x1), xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (25, 4), (4, 1))
assert_size_stride(primals_2, (25,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 25), (25, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 25), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_neg_sigmoid_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_tanh_1[grid(64)](buf0, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused_exp_4[grid(64)](buf0, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_exp_5[grid(64)](buf0, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf1, buf4, reinterpret_tensor(buf0, (4, 4, 4), (100, 25, 1), 5
), reinterpret_tensor(buf0, (4, 4, 4), (100, 25, 1), 9
), buf5, buf6, buf2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf1, buf2, buf4, buf5, buf6
class MDNNew(Module):
def __init__(self, input_size, num_mixtures):
super(MDNNew, self).__init__()
self.input_size = input_size
self.num_mixtures = num_mixtures
self.parameter_layer = Linear(in_features=input_size, out_features=
1 + 6 * num_mixtures)
def __repr__(self):
s = '{name}(input_size={input_size}, num_mixtures={num_mixtures})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, input_0):
primals_1 = self.parameter_layer.weight
primals_2 = self.parameter_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2], output[3], output[4], output[5
], output[6]
|
AnesBenmerzoug/Handwriting-Model
|
MDN
| false
| 16,894
|
[
"MIT"
] | 7
|
925a8d43174cccd58e01d41fdc513343df09d000
|
https://github.com/AnesBenmerzoug/Handwriting-Model/tree/925a8d43174cccd58e01d41fdc513343df09d000
|
MetaAconC
|
import torch
import torch.nn as nn
class MetaAconC(nn.Module):
""" ACON activation (activate or not).
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1, k=1, s=1, r=16):
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
def forward(self, x):
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
beta = torch.sigmoid(self.fc2(self.fc1(y)))
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp2
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp7 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 1, 1), (16, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(16)](buf4, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_sub_3[grid(4)](primals_6, primals_7, buf5, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_4[grid(256)](buf5, primals_1, buf4,
primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_1, primals_2, primals_4, buf0, buf2, buf4, buf5
class MetaAconCNew(nn.Module):
""" ACON activation (activate or not).
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1, k=1, s=1, r=16):
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
def forward(self, input_0):
primals_6 = self.p1
primals_7 = self.p2
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Aditya239233/MDP
|
MetaAconC
| false
| 16,895
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
Classify
|
import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Classify(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super().__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else
[x])], 1)
return self.flat(self.conv(z))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4), (4, 1), 0), primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ClassifyNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super().__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g)
self.flat = nn.Flatten()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Aditya239233/MDP
|
Classify
| false
| 16,896
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
FirstKernelTensorTrain
|
import torch
from torch import nn
import torch.nn.functional as F
class FirstKernelTensorTrain(nn.Module):
def __init__(self, m, r_j):
super(FirstKernelTensorTrain, self).__init__()
self.fc1 = nn.Linear(m, r_j, bias=False)
self.m = m
self.r_j = r_j
def forward(self, tensor):
transformed_tensor = self.fc1(tensor)
return F.relu(transformed_tensor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'r_j': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2
class FirstKernelTensorTrainNew(nn.Module):
def __init__(self, m, r_j):
super(FirstKernelTensorTrainNew, self).__init__()
self.fc1 = nn.Linear(m, r_j, bias=False)
self.m = m
self.r_j = r_j
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
AndresOtero/TensorDecompositionMachineLearning
|
FirstKernelTensorTrain
| false
| 16,897
|
[
"MIT"
] | 3
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
https://github.com/AndresOtero/TensorDecompositionMachineLearning/tree/455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
TemporalBlock
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size=3, stride=1,
dilation=1, padding=1, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, padding_mode
='circular'))
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation,
padding_mode='circular'))
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.relu1, self.dropout1,
self.conv2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_inputs': 4, 'n_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 12 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 12 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -4 + x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tmp0 >= tmp4
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp12 = float('nan')
tmp13 = tl.where(tmp9, tmp11, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tmp3 >= tmp4
tmp17 = tmp3 < tmp1
tmp18 = tmp16 & tmp17
tmp19 = tmp18 & tmp2
tmp20 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1), tmp19 & xmask, other=0.0)
tmp21 = tl.where(tmp18, tmp20, tmp12)
tmp22 = tl.where(tmp5, tmp15, tmp21)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp2, tmp22, tmp23)
tmp25 = tmp0 < tmp4
tmp26 = 4 + x0
tmp27 = tmp26 >= tmp4
tmp28 = tmp26 < tmp1
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp25
tmp31 = tl.load(in_ptr0 + (3 + x0 + 4 * x1), tmp30 & xmask, other=0.0)
tmp32 = tl.where(tmp29, tmp31, tmp12)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp25, tmp32, tmp33)
tmp35 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp36 = tl.where(tmp9, tmp35, tmp12)
tmp37 = tl.where(tmp25, tmp34, tmp36)
tmp38 = tl.where(tmp2, tmp24, tmp37)
tl.store(out_ptr0 + x2, tmp38, xmask)
@triton.jit
def triton_poi_fused_copy_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -4 + x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tmp0 >= tmp4
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = float('nan')
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp6, tmp19, tmp20)
tmp22 = tmp3 >= tmp4
tmp23 = tmp3 < tmp1
tmp24 = tmp22 & tmp23
tmp25 = tmp24 & tmp2
tmp26 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1), tmp25 & xmask, other=0.0)
tmp27 = tl.load(in_ptr1 + x1, tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp14, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tl.where(tmp24, tmp31, tmp18)
tmp33 = tl.where(tmp5, tmp21, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp2, tmp33, tmp34)
tmp36 = tmp0 < tmp4
tmp37 = 4 + x0
tmp38 = tmp37 >= tmp4
tmp39 = tmp37 < tmp1
tmp40 = tmp38 & tmp39
tmp41 = tmp40 & tmp36
tmp42 = tl.load(in_ptr0 + (3 + x0 + 4 * x1), tmp41 & xmask, other=0.0)
tmp43 = tl.load(in_ptr1 + x1, tmp41 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp44 = tmp42 + tmp43
tmp45 = triton_helpers.maximum(tmp14, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp41, tmp45, tmp46)
tmp48 = tl.where(tmp40, tmp47, tmp18)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp36, tmp48, tmp49)
tmp51 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp52 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp53 = tmp51 + tmp52
tmp54 = triton_helpers.maximum(tmp14, tmp53)
tmp55 = tl.full(tmp54.shape, 0.0, tmp54.dtype)
tmp56 = tl.where(tmp9, tmp54, tmp55)
tmp57 = tl.where(tmp9, tmp56, tmp18)
tmp58 = tl.where(tmp36, tmp50, tmp57)
tmp59 = tl.where(tmp2, tmp35, tmp58)
tl.store(out_ptr0 + x2, tmp59, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tmp10 = tmp7 <= tmp8
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
tl.store(out_ptr2 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2,
primals_1, buf2, 4, 12, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_poi_fused_copy_1[grid(24)](primals_4, buf4, 24, XBLOCK=32,
num_warps=1, num_stages=1)
buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 6
), (24, 6, 1), 0), buf2, stride=(1,), padding=(0,), dilation=(1
,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf5, (1, 4, 4), (16, 4, 1))
buf6 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1), (1, 1, 1), 0)
del buf6
buf8 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf7, primals_6,
primals_5, buf8, 4, 12, XBLOCK=1, num_warps=2, num_stages=1)
buf10 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_poi_fused_copy_2[grid(24)](buf5, primals_3, buf10, 24,
XBLOCK=32, num_warps=1, num_stages=1)
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (1, 4,
6), (24, 6, 1), 0), buf8, stride=(1,), padding=(0,), dilation=(
1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf11, (1, 4, 4), (16, 4, 1))
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf11,
primals_7, primals_4, buf12, buf14, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf11
del primals_4
del primals_7
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(16)](buf5,
primals_3, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf5
del primals_3
return (buf12, buf2, buf8, primals_1, primals_2, primals_5, primals_6,
buf1, buf2, reinterpret_tensor(buf4, (1, 4, 6), (24, 6, 1), 0),
buf7, buf8, reinterpret_tensor(buf10, (1, 4, 6), (24, 6, 1), 0),
buf13, buf14, buf15)
class TemporalBlockNew(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size=3, stride=1,
dilation=1, padding=1, dropout=0.2):
super(TemporalBlockNew, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, padding_mode
='circular'))
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation,
padding_mode='circular'))
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.relu1, self.dropout1,
self.conv2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, input_0):
primals_3 = self.conv1.bias
primals_1 = self.conv1.weight_g
primals_2 = self.conv1.weight_v
primals_7 = self.conv2.bias
primals_5 = self.conv2.weight_g
primals_6 = self.conv2.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
AdamLohSg/GTA
|
TemporalBlock
| false
| 16,898
|
[
"Apache-2.0"
] | 8
|
bf6a745a6e28e365466e76360a15ca10ce61e009
|
https://github.com/AdamLohSg/GTA/tree/bf6a745a6e28e365466e76360a15ca10ce61e009
|
AdaptiveAvgMaxPool2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch._utils
import torch.optim
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch._utils
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_mul_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp37 + tmp35
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp40, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_mul_0[grid(16)](buf2,
arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf2,
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Alicegaz/torchok
|
AdaptiveAvgMaxPool2d
| false
| 16,899
|
[
"Apache-2.0"
] | 8
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
https://github.com/Alicegaz/torchok/tree/7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
AconC
|
import torch
import torch.nn as nn
class AconC(nn.Module):
""" ACON activation (activate or not).
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp2 * tmp5
tmp8 = tmp7 * tmp1
tmp9 = tmp6 + tmp8
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(4)](primals_1, primals_2, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf0, primals_3,
primals_4, primals_2, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf1, primals_3, primals_4, buf0
class AconCNew(nn.Module):
""" ACON activation (activate or not).
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, input_0):
primals_1 = self.p1
primals_2 = self.p2
primals_4 = self.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Aditya239233/MDP
|
AconC
| false
| 16,900
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
Conv2dSameExport
|
import torch
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def _same_pad_arg(input_size, kernel_size, stride, dilation):
input_height, input_width = input_size
kernel_height, kernel_width = kernel_size
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
class Conv2dSameExport(nn.Conv2d):
""" ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
NOTE: This does not currently work with torch.jit.script
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.pad = None
self.pad_input_size = 0, 0
def forward(self, input_):
input_size = input_.size()[-2:]
if self.pad is None:
pad_arg = _same_pad_arg(input_size, self.weight.size()[-2:],
self.stride, self.dilation)
self.pad = nn.ZeroPad2d(pad_arg)
self.pad_input_size = input_size
if self.pad is not None:
input_ = self.pad(input_)
return F.conv2d(input_, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def _same_pad_arg(input_size, kernel_size, stride, dilation):
input_height, input_width = input_size
kernel_height, kernel_width = kernel_size
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
class Conv2dSameExportNew(nn.Conv2d):
""" ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
NOTE: This does not currently work with torch.jit.script
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.pad = None
self.pad_input_size = 0, 0
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Adlik/zen_nas
|
Conv2dSameExport
| false
| 16,901
|
[
"Apache-2.0"
] | 7
|
d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
https://github.com/Adlik/zen_nas/tree/d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57
|
BCEBlurWithLogitsLoss
|
import torch
import torch.nn as nn
class BCEBlurWithLogitsLoss(nn.Module):
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLoss, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none')
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred)
dx = pred - true
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 0.0001))
loss *= alpha_factor
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_exp_mean_mul_rsub_sigmoid_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.sigmoid(tmp3)
tmp14 = tmp13 - tmp0
tmp15 = tmp14 - tmp1
tmp16 = 19.96007984031936
tmp17 = tmp15 * tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp1 - tmp18
tmp20 = tmp12 * tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = 256.0
tmp25 = tmp23 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_exp_mean_mul_rsub_sigmoid_sub_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEBlurWithLogitsLossNew(nn.Module):
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLossNew, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none')
self.alpha = alpha
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Aditya239233/MDP
|
BCEBlurWithLogitsLoss
| false
| 16,902
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
Lookahead
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Lookahead(nn.Module):
def __init__(self, n_features, context):
super(Lookahead, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = 0, self.context - 1
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size
=self.context, stride=1, groups=self.n_features, padding=0,
bias=None)
def forward(self, x):
x = x.transpose(0, 1).transpose(1, 2)
x = F.pad(x, pad=self.pad, value=0)
x = self.conv(x)
x = x.transpose(1, 2).transpose(0, 1).contiguous()
return x
def __repr__(self):
return self.__class__.__name__ + '(' + 'n_features=' + str(self.
n_features) + ', context=' + str(self.context) + ')'
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'context': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = x1
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (y0 + 16 * x1), tmp2 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x1 + 7 * y0), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16,
7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(4, 16)](buf1, buf2, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
del buf1
return buf2, primals_2, buf0
class LookaheadNew(nn.Module):
def __init__(self, n_features, context):
super(LookaheadNew, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = 0, self.context - 1
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size
=self.context, stride=1, groups=self.n_features, padding=0,
bias=None)
def __repr__(self):
return self.__class__.__name__ + '(' + 'n_features=' + str(self.
n_features) + ', context=' + str(self.context) + ')'
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Anwarvic/RasaChatbot-with-ASR-and-TTS
|
Lookahead
| false
| 16,903
|
[
"MIT"
] | 7
|
57009f55d1ac8e4b347e81d9b8e33a08b4fd5618
|
https://github.com/Anwarvic/RasaChatbot-with-ASR-and-TTS/tree/57009f55d1ac8e4b347e81d9b8e33a08b4fd5618
|
Sum
|
import torch
import torch.nn as nn
class Sum(nn.Module):
def __init__(self, n, weight=False):
super().__init__()
self.weight = weight
self.iter = range(n - 1)
if weight:
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True
)
def forward(self, x):
y = x[0]
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SumNew(nn.Module):
def __init__(self, n, weight=False):
super().__init__()
self.weight = weight
self.iter = range(n - 1)
if weight:
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True
)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aditya239233/MDP
|
Sum
| false
| 16,904
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
TTKernel
|
import torch
from torch import nn
import torch.nn.functional as F
class TTKernel(nn.Module):
def __init__(self, r_i, m, r_j):
super(TTKernel, self).__init__()
self.fc1 = nn.Bilinear(r_i, m, r_j, bias=False)
def forward(self, input_tensor_1, input_tensor_2):
tensor_transformed = self.fc1(input_tensor_1, input_tensor_2)
return F.relu(tensor_transformed)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r_i': 4, 'm': 4, 'r_j': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(
primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3
class TTKernelNew(nn.Module):
def __init__(self, r_i, m, r_j):
super(TTKernelNew, self).__init__()
self.fc1 = nn.Bilinear(r_i, m, r_j, bias=False)
def forward(self, input_0, input_1):
primals_1 = self.fc1.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AndresOtero/TensorDecompositionMachineLearning
|
TTKernel
| false
| 16,905
|
[
"MIT"
] | 3
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
https://github.com/AndresOtero/TensorDecompositionMachineLearning/tree/455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
Expand
|
import torch
import torch.nn as nn
class Expand(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size()
s = self.gain
x = x.view(b, s, s, c // s ** 2, h, w)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous()
return x.view(b, c // s ** 2, h * s, w * s)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y1 = yindex // 4 % 2
y2 = yindex // 8 % 4
y3 = yindex // 32
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * y2 + 16 * x4 + 32 * y1 + 64 * y3),
xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 2 * y5), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 2, 4, 2), (64, 64, 16, 8, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(128, 2)](arg0_1, buf0, 128, 2, XBLOCK
=2, YBLOCK=64, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 8, 8), (64, 64, 8, 1), 0),
class ExpandNew(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aditya239233/MDP
|
Expand
| false
| 16,906
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
Contract
|
import torch
import torch.nn as nn
class Contract(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size()
s = self.gain
x = x.view(b, c, h // s, s, w // s, s)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
return x.view(b, c * s * s, h // s, w // s)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex % 2
x4 = xindex // 2
y0 = yindex % 2
y1 = yindex // 2 % 2
y2 = yindex // 4
x6 = xindex
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x3 + 4 * y1 + 8 * x4 + 64 * y2),
xmask & ymask)
tl.store(out_ptr0 + (x6 + 16 * y5), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 2, 4, 2, 2), (64, 32, 16, 4, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0),
class ContractNew(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aditya239233/MDP
|
Contract
| false
| 16,907
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
GlobalAvgPool2d
|
import torch
import torch.nn as nn
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Anikily/CDinkNet
|
GlobalAvgPool2d
| false
| 16,908
|
[
"MIT"
] | 4
|
490736855475a51bb2984412e88ac7d50d817a3c
|
https://github.com/Anikily/CDinkNet/tree/490736855475a51bb2984412e88ac7d50d817a3c
|
SiLU
|
import torch
import torch.nn as nn
class SiLU(nn.Module):
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SiLUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aditya239233/MDP
|
SiLU
| false
| 16,909
|
[
"MIT"
] | 4
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
https://github.com/Aditya239233/MDP/tree/87491e1d67e547c11f4bdd5d784d120473429eae
|
QAConvSDSLayer
|
import torch
import torch.nn as nn
class QAConvSDSLayer(nn.Module):
"""Conv SDS layer for qa output"""
def __init__(self, input_size: 'int', hidden_dim: 'int'):
"""
Args:
input_size (int): max sequence lengths
hidden_dim (int): backbones's hidden dimension
"""
super().__init__()
self.conv1 = nn.Conv1d(in_channels=input_size, out_channels=
input_size * 2, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(in_channels=input_size * 2, out_channels=
input_size, kernel_size=1)
self.layer_norm = nn.LayerNorm(hidden_dim)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = self.conv1(x)
out = self.conv2(out)
out = x + torch.relu(out)
out = self.layer_norm(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_relu_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp4 = tmp0 + tmp3
tmp7 = triton_helpers.maximum(tmp2, tmp6)
tmp8 = tmp5 + tmp7
tmp9 = tmp4 + tmp8
tmp12 = triton_helpers.maximum(tmp2, tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = triton_helpers.maximum(tmp2, tmp16)
tmp18 = tmp15 + tmp17
tmp19 = tmp14 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = tmp4 - tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp8 - tmp21
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = tmp13 - tmp21
tmp28 = tmp27 * tmp27
tmp29 = tmp26 + tmp28
tmp30 = tmp18 - tmp21
tmp31 = tmp30 * tmp30
tmp32 = tmp29 + tmp31
tmp33 = tmp32 / tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + x0, tmp33, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3), (12, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 8, 4), (32, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(32)](buf1, primals_2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 8, 4
), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_relu_2[grid(4)](primals_3,
buf3, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_relu_3[grid(16)](primals_3,
buf3, buf4, buf5, primals_6, primals_7, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
del buf5
del primals_7
return buf6, primals_1, primals_3, primals_4, primals_6, buf1, buf3
class QAConvSDSLayerNew(nn.Module):
"""Conv SDS layer for qa output"""
def __init__(self, input_size: 'int', hidden_dim: 'int'):
"""
Args:
input_size (int): max sequence lengths
hidden_dim (int): backbones's hidden dimension
"""
super().__init__()
self.conv1 = nn.Conv1d(in_channels=input_size, out_channels=
input_size * 2, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(in_channels=input_size * 2, out_channels=
input_size, kernel_size=1)
self.layer_norm = nn.LayerNorm(hidden_dim)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Amber-Chaeeunk/Open-Domain-Question-Answering
|
QAConvSDSLayer
| false
| 16,910
|
[
"MIT"
] | 5
|
725e369a4409c54bf11bcfb9db53865d8fc1f935
|
https://github.com/Amber-Chaeeunk/Open-Domain-Question-Answering/tree/725e369a4409c54bf11bcfb9db53865d8fc1f935
|
FeatureMap
|
import torch
from torch import nn
import torch.nn.functional as F
class FeatureMap(nn.Module):
def __init__(self, n, m, amount_of_division, batch_size):
super(FeatureMap, self).__init__()
self.m = m
self.n = n
self.amount_of_division = amount_of_division
self.batch_size = batch_size
self.fc1 = nn.Linear(self.n, self.m)
def forward(self, tensor):
last_dim = tensor.size()[-1]
tensor = tensor.contiguous()
tensor_reshaped = tensor.view(-1, last_dim)
tensor_transformed = F.relu(self.fc1(tensor_reshaped))
return tensor_transformed
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4, 'm': 4, 'amount_of_division': 4, 'batch_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2
class FeatureMapNew(nn.Module):
def __init__(self, n, m, amount_of_division, batch_size):
super(FeatureMapNew, self).__init__()
self.m = m
self.n = n
self.amount_of_division = amount_of_division
self.batch_size = batch_size
self.fc1 = nn.Linear(self.n, self.m)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AndresOtero/TensorDecompositionMachineLearning
|
FeatureMap
| false
| 16,911
|
[
"MIT"
] | 3
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
https://github.com/AndresOtero/TensorDecompositionMachineLearning/tree/455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.