entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
CausalConv2d
|
import torch
from torch import nn
import torch.utils.data
class WNConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride=stride, padding=padding, bias=bias))
self.out_channel = out_channel
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.kernel_size = kernel_size
self.activation = activation
def forward(self, input):
out = self.conv(input)
if self.activation is not None:
out = self.activation(out)
return out
class CausalConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding='downright', activation=None):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 2
self.kernel_size = kernel_size
if padding == 'downright':
pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0]
elif padding == 'down' or padding == 'causal':
pad = kernel_size[1] // 2
pad = [pad, pad, kernel_size[0] - 1, 0]
self.causal = 0
if padding == 'causal':
self.causal = kernel_size[1] // 2
self.pad = nn.ZeroPad2d(pad)
self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride=
stride, padding=0, activation=activation)
def forward(self, input):
out = self.pad(input)
if self.causal > 0:
self.conv.conv.weight_v.data[:, :, -1, self.causal:].zero_()
out = self.conv(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -3 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -3 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-15 + x0 + 4 * x1 + 16 * x2), tmp5 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3,
primals_2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3
class WNConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride=stride, padding=padding, bias=bias))
self.out_channel = out_channel
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.kernel_size = kernel_size
self.activation = activation
def forward(self, input):
out = self.conv(input)
if self.activation is not None:
out = self.activation(out)
return out
class CausalConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding='downright', activation=None):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 2
self.kernel_size = kernel_size
if padding == 'downright':
pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0]
elif padding == 'down' or padding == 'causal':
pad = kernel_size[1] // 2
pad = [pad, pad, kernel_size[0] - 1, 0]
self.causal = 0
if padding == 'causal':
self.causal = kernel_size[1] // 2
self.pad = nn.ZeroPad2d(pad)
self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride=
stride, padding=0, activation=activation)
def forward(self, input_0):
primals_4 = self.conv.conv.bias
primals_2 = self.conv.conv.weight_g
primals_1 = self.conv.conv.weight_v
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Shivanshu-Gupta/KaoKore-VQ-VAE2
|
CausalConv2d
| false
| 1,075
|
[
"MIT"
] | 0
|
38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
|
https://github.com/Shivanshu-Gupta/KaoKore-VQ-VAE2/tree/38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
|
FastGRNNCell
|
import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class FastGRNNCell(RNNCell):
"""
FastGRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
wSparsity = intended sparsity of W matrix(ces)
uSparsity = intended sparsity of U matrix(ces)
Warning:
The Cell will not automatically sparsify.
The user must invoke .sparsify to hard threshold.
zetaInit = init for zeta, the scale param
nuInit = init for nu, the translation param
FastGRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(Wx_t + Uh_{t-1} + B_g)
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'):
super(FastGRNNCell, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank,
wSparsity, uSparsity)
self._zetaInit = zetaInit
self._nuInit = nuInit
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])
)
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1]))
self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1]))
@property
def name(self):
return self._name
@property
def cellType(self):
return 'FastGRNN'
def forward(self, input, state):
if self._wRank is None:
wComp = torch.matmul(input, self.W)
else:
wComp = torch.matmul(torch.matmul(input, self.W1), self.W2)
if self._uRank is None:
uComp = torch.matmul(state, self.U)
else:
uComp = torch.matmul(torch.matmul(state, self.U1), self.U2)
pre_comp = wComp + uComp
z = gen_nonlinearity(pre_comp + self.bias_gate, self._gate_nonlinearity
)
c = gen_nonlinearity(pre_comp + self.bias_update, self.
_update_nonlinearity)
new_h = z * state + (torch.sigmoid(self.zeta) * (1.0 - z) + torch.
sigmoid(self.nu)) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_U_matrices == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
Vars.extend([self.zeta, self.nu])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp14 = tl.load(in_ptr4 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp10 = tl.sigmoid(tmp9)
tmp11 = 1.0
tmp12 = tmp11 - tmp5
tmp13 = tmp10 * tmp12
tmp16 = tl.sigmoid(tmp15)
tmp17 = tmp13 + tmp16
tmp19 = tmp2 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp21 = tmp17 * tmp20
tmp22 = tmp7 + tmp21
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp22, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, 1), (1, 1))
assert_size_stride(primals_8, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf2, buf1,
primals_5, primals_4, primals_7, primals_8, primals_6, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8,
buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0))
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class FastGRNNCellNew(RNNCell):
"""
FastGRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
wSparsity = intended sparsity of W matrix(ces)
uSparsity = intended sparsity of U matrix(ces)
Warning:
The Cell will not automatically sparsify.
The user must invoke .sparsify to hard threshold.
zetaInit = init for zeta, the scale param
nuInit = init for nu, the translation param
FastGRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(Wx_t + Uh_{t-1} + B_g)
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'):
super(FastGRNNCellNew, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank,
wSparsity, uSparsity)
self._zetaInit = zetaInit
self._nuInit = nuInit
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])
)
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1]))
self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1]))
@property
def name(self):
return self._name
@property
def cellType(self):
return 'FastGRNN'
def getVars(self):
Vars = []
if self._num_W_matrices == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_U_matrices == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
Vars.extend([self.zeta, self.nu])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W
primals_3 = self.U
primals_5 = self.bias_gate
primals_6 = self.bias_update
primals_7 = self.zeta
primals_8 = self.nu
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
ShishirPatil/EdgeML-1
|
FastGRNNCell
| false
| 1,076
|
[
"MIT"
] | 0
|
cbba9f8b989e545788427c004eb8450e7e4c1a21
|
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
|
ResidualConvUnit
|
import torch
import torch.nn as nn
class ResidualConvUnit(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1,
padding=1, bias=True)
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
return out + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_2[grid(256)](buf4, buf0, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf4, primals_2, primals_4, buf0, buf2
class ResidualConvUnitNew(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1,
padding=1, bias=True)
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ShiraLightricks/3d-photo-inpainting
|
ResidualConvUnit
| false
| 1,077
|
[
"MIT"
] | 0
|
c42ac41576690b765e50f5281ddbfb58439ff36d
|
https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d
|
LayerNorm
|
import torch
import numpy as np
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
num_params += np.prod(param.detach().cpu().numpy().shape)
return num_params
def relocate_input(self, x: 'list'):
"""
Relocates provided tensors to the same device set for the module.
"""
device = next(self.parameters()).device
for i in range(len(x)):
if isinstance(x[i], torch.Tensor) and x[i].device != device:
x[i] = x[i]
return x
class LayerNorm(BaseModule):
def __init__(self, channels, eps=0.0001):
super(LayerNorm, self).__init__()
self.channels = channels
self.eps = eps
self.gamma = torch.nn.Parameter(torch.ones(channels))
self.beta = torch.nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0,
primals_2, primals_3, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
num_params += np.prod(param.detach().cpu().numpy().shape)
return num_params
def relocate_input(self, x: 'list'):
"""
Relocates provided tensors to the same device set for the module.
"""
device = next(self.parameters()).device
for i in range(len(x)):
if isinstance(x[i], torch.Tensor) and x[i].device != device:
x[i] = x[i]
return x
class LayerNormNew(BaseModule):
def __init__(self, channels, eps=0.0001):
super(LayerNormNew, self).__init__()
self.channels = channels
self.eps = eps
self.gamma = torch.nn.Parameter(torch.ones(channels))
self.beta = torch.nn.Parameter(torch.zeros(channels))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sobsz/uberduck-ml-dev
|
LayerNorm
| false
| 1,078
|
[
"Apache-2.0"
] | 0
|
f099238f6f2e3f600d72d89dea3c883c59d91387
|
https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387
|
Loss
|
import torch
import torch as t
import torch.nn as nn
def indicator(K):
"""
@K: number of users
"""
return t.eye(5 * K)
class Loss(nn.Module):
def __init__(self, K, Nt, Vartheta):
super(Loss, self).__init__()
self.K = K
self.Nt = Nt
self.Delta = indicator(self.K)
self.alpha = 1 / self.K
self.Vartheta = Vartheta
self.batchsize = 10
def forward(self, x, ind1, ind2):
"""
@x: output of the last layer, its dimmension is (batchsize, 2*K*K+3*K)
"""
loss = self.alpha * t.mean(t.matmul(t.log(1 + x), ind1) - t.matmul(
self.Vartheta * x, ind2))
return -loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'K': 4, 'Nt': 4, 'Vartheta': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch as t
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_log_mul_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = 4.0
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tmp8 = 0.25
tmp9 = tmp7 * tmp8
tmp10 = -tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_mul_0[grid(256)](arg0_1, buf0, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), out=buf1
)
del arg1_1
buf3 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3
)
del arg2_1
del buf2
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_mean_mul_neg_sub_1[grid(1)](buf5, buf1, buf3, 1,
256, num_warps=2, num_stages=1)
del buf1
del buf3
return buf5,
def indicator(K):
"""
@K: number of users
"""
return t.eye(5 * K)
class LossNew(nn.Module):
def __init__(self, K, Nt, Vartheta):
super(LossNew, self).__init__()
self.K = K
self.Nt = Nt
self.Delta = indicator(self.K)
self.alpha = 1 / self.K
self.Vartheta = Vartheta
self.batchsize = 10
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
SoulVen/USRMNet-HWGCN
|
Loss
| false
| 1,079
|
[
"Apache-2.0"
] | 0
|
2f99f53150335be26270bd408ce59dc51c8435cc
|
https://github.com/SoulVen/USRMNet-HWGCN/tree/2f99f53150335be26270bd408ce59dc51c8435cc
|
AttNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttNet(nn.Module):
def __init__(self, num_input_ch):
super(AttNet, self).__init__()
self.num_input_ch = num_input_ch
self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(64, 16, 1, bias=True)
self.conv3 = nn.Conv2d(16, 1, 1, bias=True)
def forward(self, warp_feat, conv_feat):
concat_feat = torch.cat([warp_feat, conv_feat], dim=0)
weights = F.relu(self.conv1(concat_feat))
weights = F.relu(self.conv2(weights))
weights = F.softmax(self.conv3(weights), dim=0)
weights = torch.split(weights, 2, dim=0)
weight1 = torch.tile(weights[0], (1, self.num_input_ch, 1, 1))
weight2 = torch.tile(weights[0], (1, self.num_input_ch, 1, 1))
out_feat = weight1 * warp_feat + weight2 * conv_feat
return out_feat
def get_inputs():
return [torch.rand([2, 4, 4, 4]), torch.rand([2, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_input_ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-2 + x1)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_convolution_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp10 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp3 = tmp0 + tmp2
tmp5 = tmp4 + tmp2
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7 + tmp2
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 + tmp2
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused__softmax_convolution_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_repeat_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (2, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (16, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(4096)](buf2, primals_4,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(1024)](buf4, primals_6,
1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_convolution_3[grid(16)](buf5, primals_8,
buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = buf5
del buf5
triton_poi_fused__softmax_convolution_4[grid(64)](buf8, primals_8,
buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf6
del buf7
del primals_8
buf9 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_repeat_5[grid(128)](buf8, primals_1,
primals_2, buf9, 128, XBLOCK=128, num_warps=4, num_stages=1)
return (buf9, primals_1, primals_2, primals_3, primals_5, primals_7,
buf0, buf2, buf4, buf8)
class AttNetNew(nn.Module):
def __init__(self, num_input_ch):
super(AttNetNew, self).__init__()
self.num_input_ch = num_input_ch
self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(64, 16, 1, bias=True)
self.conv3 = nn.Conv2d(16, 1, 1, bias=True)
def forward(self, input_0, input_1):
primals_3 = self.conv1.weight
primals_4 = self.conv1.bias
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_7 = self.conv3.weight
primals_8 = self.conv3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
SionHu/LP-MOT
|
AttNet
| false
| 1,080
|
[
"MIT"
] | 0
|
90e6a1d51ebe1a948ac5c018a5ee560654e824f1
|
https://github.com/SionHu/LP-MOT/tree/90e6a1d51ebe1a948ac5c018a5ee560654e824f1
|
Net
|
import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class Net(nn.Module):
def __init__(self, nFeatures, nHidden1, nHidden2):
super(Net, self).__init__()
self.l1 = FcCat(nFeatures, nHidden1)
self.l2 = FcCat(nFeatures + nHidden1, nHidden2)
def forward(self, x):
out = self.l1(x)
out = self.l2(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nFeatures': 4, 'nHidden1': 4, 'nHidden2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
x1 = xindex // 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8)
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf3)
buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0)
triton_poi_fused_cat_1[grid(32)](buf2, buf4, 32, XBLOCK=32,
num_warps=1, num_stages=1)
return buf5, primals_2, buf2, primals_3
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class NetNew(nn.Module):
def __init__(self, nFeatures, nHidden1, nHidden2):
super(NetNew, self).__init__()
self.l1 = FcCat(nFeatures, nHidden1)
self.l2 = FcCat(nFeatures + nHidden1, nHidden2)
def forward(self, input_0):
primals_1 = self.l1.fc.weight
primals_3 = self.l2.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sreehari-S/Tiramisu_DigestPath
|
Net
| false
| 1,081
|
[
"Apache-2.0"
] | 0
|
a884ee911bc60ce997996e0ec2e6036600ffcffa
|
https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa
|
DecoderLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, key_sequence_length, size_per_head = key.size()
query = query.view(batch_size, num_head, query_sequence_length,
size_per_head)
key = key.view(batch_size, num_head, size_per_head, key_sequence_length
)
attention_score = torch.einsum('abcd, abde -> abce', query, key)
attention_score = attention_score / math.sqrt(size_per_head)
if mask is not None:
attention_score = attention_score.masked_fill(mask == 0, -
1000000000.0)
attention_score = F.softmax(attention_score, dim=-1)
result = attention_score @ value
return result, attention_score
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, num_head):
super(MultiHeadAttention, self).__init__()
self.model_dim = model_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.num_head = num_head
self.Wq = nn.Linear(model_dim, key_dim)
self.Wk = nn.Linear(model_dim, key_dim)
self.Wv = nn.Linear(model_dim, value_dim)
self.attention = ScaledDotProductAttention()
self.Wo = nn.Linear(value_dim, model_dim)
def forward(self, query, key, value, mask=None):
prj_query = self.Wq(query)
prj_key = self.Wk(key)
prj_value = self.Wv(value)
multihead_query = self.multihead_split(prj_query)
multihead_key = self.multihead_split(prj_key)
multihead_value = self.multihead_split(prj_value)
attention_output, _attention_score = self.attention(multihead_query,
multihead_key, multihead_value, mask=mask)
output = self.multihead_concat(attention_output)
output = self.Wo(output)
return output
def multihead_split(self, tensor):
batch_size, sequence_length, hidden_size = tensor.size()
size_per_head = hidden_size // self.num_head
return tensor.view(batch_size, self.num_head, sequence_length,
size_per_head)
def multihead_concat(self, tensor):
batch_size, num_head, sequence_length, size_per_head = tensor.size()
hidden_size = num_head * size_per_head
return tensor.view(batch_size, sequence_length, hidden_size)
class FeedForward(nn.Module):
def __init__(self, model_dim, hidden_dim, drop_prob):
super(FeedForward, self).__init__()
self.model_dim = model_dim
self.hidden_dim = hidden_dim
self.drop_prob = drop_prob
self.linearlayer1 = nn.Linear(model_dim, hidden_dim)
self.linearlayer2 = nn.Linear(hidden_dim, model_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(drop_prob)
def forward(self, tensor):
tensor = self.dropout(self.relu(self.linearlayer1(tensor)))
return self.linearlayer2(tensor)
class DecoderLayer(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head,
drop_prob):
super(DecoderLayer, self).__init__()
self.self_attention = MultiHeadAttention(model_dim, key_dim,
value_dim, num_head)
self.normalization1 = nn.LayerNorm(model_dim)
self.dropout1 = nn.Dropout(drop_prob)
self.enc_dec_attention = MultiHeadAttention(model_dim, key_dim,
value_dim, num_head)
self.normalization2 = nn.LayerNorm(model_dim)
self.dropout2 = nn.Dropout(drop_prob)
self.ffn = FeedForward(model_dim, hidden_dim, drop_prob)
self.normalization3 = nn.LayerNorm(model_dim)
self.dropout3 = nn.Dropout(drop_prob)
def forward(self, dec_tensor, enc_tensor, source_mask, target_mask):
residual = dec_tensor
tensor = self.self_attention(query=dec_tensor, key=dec_tensor,
value=dec_tensor, mask=target_mask)
tensor = self.dropout1(self.normalization1(tensor + residual))
if enc_tensor is not None:
residual = tensor
tensor = self.enc_dec_attention(query=tensor, key=enc_tensor,
value=enc_tensor, mask=source_mask)
tensor = self.dropout2(self.normalization2(tensor + residual))
residual = tensor
tensor = self.ffn(tensor)
tensor = self.dropout3(self.normalization3(tensor + residual))
return tensor
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4, 'key_dim': 4, 'value_dim': 4, 'hidden_dim':
4, 'num_head': 4, 'drop_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp15 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp21 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = -1000000000.0
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp10 = tmp1 * tmp9
tmp11 = tmp10 * tmp4
tmp12 = tl.where(tmp8, tmp6, tmp11)
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp16 = tmp1 * tmp15
tmp17 = tmp16 * tmp4
tmp18 = tl.where(tmp14, tmp6, tmp17)
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp22 = tmp1 * tmp21
tmp23 = tmp22 * tmp4
tmp24 = tl.where(tmp20, tmp6, tmp23)
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tmp26 = tmp7 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp12 - tmp25
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp18 - tmp25
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp24 - tmp25
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tl.store(out_ptr0 + x2, tmp25, xmask)
tl.store(out_ptr1 + x2, tmp36, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = -1000000000.0
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_21, (4, 4), (4, 1))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4,), (1,))
assert_size_stride(primals_25, (4, 4), (4, 1))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (4, 4), (4, 1))
assert_size_stride(primals_28, (4,), (1,))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_0[grid(256)](primals_8, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_8
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf3, buf0,
buf1, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf3, buf0,
buf1, buf4, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf7)
buf8 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_10
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf8, primals_1,
buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(64)](buf8, primals_1,
buf9, buf10, primals_11, primals_12, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_15
buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(primals_13, (16,
4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf13)
del primals_16
del primals_17
buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_19, reinterpret_tensor(primals_13, (16,
4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf14)
del primals_18
del primals_19
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_eq_0[grid(256)](primals_20, buf15, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_20
buf16 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf15, buf12,
buf13, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf15, buf12,
buf13, buf16, buf17, buf18, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf19 = reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 1), 0)
del buf17
extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 1), 0), out=buf19)
buf20 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0)
del buf16
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0)
del buf20
triton_poi_fused_add_5[grid(64)](buf21, primals_22, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_22
buf22 = buf9
del buf9
buf23 = buf10
del buf10
triton_poi_fused_native_layer_norm_6[grid(16)](buf21, buf22, buf23,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_7[grid(64)](buf21, buf22, buf23,
primals_23, primals_24, buf24, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_24
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_8[grid(64)](buf26,
primals_26, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_26
buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0)
del buf27
triton_poi_fused_add_5[grid(64)](buf28, primals_28, buf24, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_28
buf29 = buf23
del buf23
buf30 = buf22
del buf22
triton_poi_fused_native_layer_norm_6[grid(16)](buf28, buf29, buf30,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_7[grid(64)](buf28, buf29, buf30,
primals_29, primals_30, buf31, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf29
del buf30
del primals_30
return (buf31, primals_1, primals_11, primals_23, primals_29, buf0,
buf1, buf3, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0),
buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12,
reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), buf13, buf15,
buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf21,
reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(
buf26, (16, 4), (4, 1), 0), buf28, primals_27, buf32, primals_25,
primals_21, reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0),
primals_14, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1,
1), 0))
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, key_sequence_length, size_per_head = key.size()
query = query.view(batch_size, num_head, query_sequence_length,
size_per_head)
key = key.view(batch_size, num_head, size_per_head, key_sequence_length
)
attention_score = torch.einsum('abcd, abde -> abce', query, key)
attention_score = attention_score / math.sqrt(size_per_head)
if mask is not None:
attention_score = attention_score.masked_fill(mask == 0, -
1000000000.0)
attention_score = F.softmax(attention_score, dim=-1)
result = attention_score @ value
return result, attention_score
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, num_head):
super(MultiHeadAttention, self).__init__()
self.model_dim = model_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.num_head = num_head
self.Wq = nn.Linear(model_dim, key_dim)
self.Wk = nn.Linear(model_dim, key_dim)
self.Wv = nn.Linear(model_dim, value_dim)
self.attention = ScaledDotProductAttention()
self.Wo = nn.Linear(value_dim, model_dim)
def forward(self, query, key, value, mask=None):
prj_query = self.Wq(query)
prj_key = self.Wk(key)
prj_value = self.Wv(value)
multihead_query = self.multihead_split(prj_query)
multihead_key = self.multihead_split(prj_key)
multihead_value = self.multihead_split(prj_value)
attention_output, _attention_score = self.attention(multihead_query,
multihead_key, multihead_value, mask=mask)
output = self.multihead_concat(attention_output)
output = self.Wo(output)
return output
def multihead_split(self, tensor):
batch_size, sequence_length, hidden_size = tensor.size()
size_per_head = hidden_size // self.num_head
return tensor.view(batch_size, self.num_head, sequence_length,
size_per_head)
def multihead_concat(self, tensor):
batch_size, num_head, sequence_length, size_per_head = tensor.size()
hidden_size = num_head * size_per_head
return tensor.view(batch_size, sequence_length, hidden_size)
class FeedForward(nn.Module):
def __init__(self, model_dim, hidden_dim, drop_prob):
super(FeedForward, self).__init__()
self.model_dim = model_dim
self.hidden_dim = hidden_dim
self.drop_prob = drop_prob
self.linearlayer1 = nn.Linear(model_dim, hidden_dim)
self.linearlayer2 = nn.Linear(hidden_dim, model_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(drop_prob)
def forward(self, tensor):
tensor = self.dropout(self.relu(self.linearlayer1(tensor)))
return self.linearlayer2(tensor)
class DecoderLayerNew(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head,
drop_prob):
super(DecoderLayerNew, self).__init__()
self.self_attention = MultiHeadAttention(model_dim, key_dim,
value_dim, num_head)
self.normalization1 = nn.LayerNorm(model_dim)
self.dropout1 = nn.Dropout(drop_prob)
self.enc_dec_attention = MultiHeadAttention(model_dim, key_dim,
value_dim, num_head)
self.normalization2 = nn.LayerNorm(model_dim)
self.dropout2 = nn.Dropout(drop_prob)
self.ffn = FeedForward(model_dim, hidden_dim, drop_prob)
self.normalization3 = nn.LayerNorm(model_dim)
self.dropout3 = nn.Dropout(drop_prob)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.self_attention.Wq.weight
primals_3 = self.self_attention.Wq.bias
primals_4 = self.self_attention.Wk.weight
primals_5 = self.self_attention.Wk.bias
primals_6 = self.self_attention.Wv.weight
primals_7 = self.self_attention.Wv.bias
primals_9 = self.self_attention.Wo.weight
primals_10 = self.self_attention.Wo.bias
primals_11 = self.normalization1.weight
primals_12 = self.normalization1.bias
primals_14 = self.enc_dec_attention.Wq.weight
primals_15 = self.enc_dec_attention.Wq.bias
primals_16 = self.enc_dec_attention.Wk.weight
primals_17 = self.enc_dec_attention.Wk.bias
primals_18 = self.enc_dec_attention.Wv.weight
primals_19 = self.enc_dec_attention.Wv.bias
primals_21 = self.enc_dec_attention.Wo.weight
primals_22 = self.enc_dec_attention.Wo.bias
primals_23 = self.normalization2.weight
primals_24 = self.normalization2.bias
primals_25 = self.ffn.linearlayer1.weight
primals_26 = self.ffn.linearlayer1.bias
primals_27 = self.ffn.linearlayer2.weight
primals_28 = self.ffn.linearlayer2.bias
primals_29 = self.normalization3.weight
primals_30 = self.normalization3.bias
primals_1 = input_0
primals_13 = input_1
primals_8 = input_2
primals_20 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0]
|
SeungoneKim/Transformer_implementation
|
DecoderLayer
| false
| 1,082
|
[
"Apache-2.0"
] | 0
|
a52bf552eb645fc9bfb812cc26842fc147d6c008
|
https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008
|
TransitionUp
|
import torch
import torch.nn as nn
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 8
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 128
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (20 + x0 + 9 * x1 + 81 * x2 + 324 * x3), tmp4 &
xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x4 + 16 * (-4 + x2) + 64 * x3), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1,
512, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_4
return buf1, primals_1, primals_3
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUpNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, input_0, input_1):
primals_1 = self.convTrans.weight
primals_2 = self.convTrans.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Sreehari-S/Tiramisu_DigestPath
|
TransitionUp
| false
| 1,083
|
[
"Apache-2.0"
] | 0
|
a884ee911bc60ce997996e0ec2e6036600ffcffa
|
https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa
|
ConvBlock
|
import torch
import torch.nn as nn
import torch.utils.data
class WSConv2d(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / self.conv.weight[0].numel()) ** 0.5
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.conv.bias)
def forward(self, x):
return self.conv(x * self.scale)
class PixelNorm(nn.Module):
def __init__(self):
super(PixelNorm, self).__init__()
self.epsilon = 1e-08
def forward(self, x):
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) +
self.epsilon)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, use_pixelnorm=True):
super(ConvBlock, self).__init__()
self.use_pn = use_pixelnorm
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2)
self.pn = PixelNorm()
def forward(self, x):
x = self.leaky(self.conv1(x))
x = self.pn(x) if self.use_pn else x
x = self.leaky(self.conv2(x))
x = self.pn(x) if self.use_pn else x
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp7 = tmp6 > tmp1
tmp8 = tmp6 * tmp3
tmp9 = tl.where(tmp7, tmp6, tmp8)
tmp10 = tmp9 * tmp9
tmp12 = tmp11 > tmp1
tmp13 = tmp11 * tmp3
tmp14 = tl.where(tmp12, tmp11, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp18 = tmp17 > tmp1
tmp19 = tmp17 * tmp3
tmp20 = tl.where(tmp18, tmp17, tmp19)
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp24 = tmp23 > tmp1
tmp25 = tmp23 * tmp3
tmp26 = tl.where(tmp24, tmp23, tmp25)
tmp27 = tmp26 * tmp26
tmp28 = tmp22 + tmp27
tmp29 = 4.0
tmp30 = tmp28 / tmp29
tmp31 = 1e-08
tmp32 = tmp30 + tmp31
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp5 / tmp33
tmp35 = 0.23570226039551584
tmp36 = tmp34 * tmp35
tl.store(in_out_ptr0 + x3, tmp36, xmask)
@triton.jit
def triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp7 = tmp6 > tmp1
tmp8 = tmp6 * tmp3
tmp9 = tl.where(tmp7, tmp6, tmp8)
tmp10 = tmp9 * tmp9
tmp12 = tmp11 > tmp1
tmp13 = tmp11 * tmp3
tmp14 = tl.where(tmp12, tmp11, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp18 = tmp17 > tmp1
tmp19 = tmp17 * tmp3
tmp20 = tl.where(tmp18, tmp17, tmp19)
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp24 = tmp23 > tmp1
tmp25 = tmp23 * tmp3
tmp26 = tl.where(tmp24, tmp23, tmp25)
tmp27 = tmp26 * tmp26
tmp28 = tmp22 + tmp27
tmp29 = 4.0
tmp30 = tmp28 / tmp29
tmp31 = 1e-08
tmp32 = tmp30 + tmp31
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp5 / tmp33
tl.store(out_ptr0 + x3, tmp34, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2[grid(256)](buf4
, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_1[grid(256)](buf6, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3[grid(256)](buf6,
buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf7, primals_2, primals_4, buf0, buf2, buf4, buf6
class WSConv2d(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / self.conv.weight[0].numel()) ** 0.5
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.conv.bias)
def forward(self, x):
return self.conv(x * self.scale)
class PixelNorm(nn.Module):
def __init__(self):
super(PixelNorm, self).__init__()
self.epsilon = 1e-08
def forward(self, x):
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) +
self.epsilon)
class ConvBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, use_pixelnorm=True):
super(ConvBlockNew, self).__init__()
self.use_pn = use_pixelnorm
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2)
self.pn = PixelNorm()
def forward(self, input_0):
primals_2 = self.conv1.conv.weight
primals_3 = self.conv1.conv.bias
primals_4 = self.conv2.conv.weight
primals_5 = self.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
SongsLearning/Machine-Learning-Collection
|
ConvBlock
| false
| 1,084
|
[
"MIT"
] | 0
|
a8dff83969f67d37f70a89db06b851057d2da539
|
https://github.com/SongsLearning/Machine-Learning-Collection/tree/a8dff83969f67d37f70a89db06b851057d2da539
|
FcCat
|
import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class FcCatNew(nn.Module):
def __init__(self, nIn, nOut):
super(FcCatNew, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Sreehari-S/Tiramisu_DigestPath
|
FcCat
| false
| 1,086
|
[
"Apache-2.0"
] | 0
|
a884ee911bc60ce997996e0ec2e6036600ffcffa
|
https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa
|
WSConv2d
|
import torch
import torch.nn as nn
import torch.utils.data
class WSConv2d(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / self.conv.weight[0].numel()) ** 0.5
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.conv.bias)
def forward(self, x):
return self.conv(x * self.scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.23570226039551584
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class WSConv2dNew(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, gain=2):
super(WSConv2dNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding)
self.scale = (gain / self.conv.weight[0].numel()) ** 0.5
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.conv.bias)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
SongsLearning/Machine-Learning-Collection
|
WSConv2d
| false
| 1,087
|
[
"MIT"
] | 0
|
a8dff83969f67d37f70a89db06b851057d2da539
|
https://github.com/SongsLearning/Machine-Learning-Collection/tree/a8dff83969f67d37f70a89db06b851057d2da539
|
Standardscaler
|
import torch
class Standardscaler(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_batch):
std, mean = torch.std_mean(input_batch.type(torch.float32),
unbiased=False)
total = (input_batch - mean) / std
return total
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_std_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = tmp0 - tmp8
tmp15 = 256.0
tmp16 = tmp13 / tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp14 / tmp17
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp18, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_std_mean_sub_0[grid(1)](arg0_1, buf3, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
class StandardscalerNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Stuksus/StandardScaler_for_pytorch
|
Standardscaler
| false
| 1,088
|
[
"MIT"
] | 0
|
27da9afd111007f20a615bee9a5a7ac272adb241
|
https://github.com/Stuksus/StandardScaler_for_pytorch/tree/27da9afd111007f20a615bee9a5a7ac272adb241
|
FeatureResizer
|
import torch
import torch.utils.data
import torch
from torch import nn
class FeatureResizer(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
super().__init__()
self.do_ln = do_ln
self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
self.dropout = nn.Dropout(dropout)
def forward(self, encoder_features):
x = self.fc(encoder_features)
if self.do_ln:
x = self.layer_norm(x)
output = self.dropout(x)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_feat_size': 4, 'output_feat_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](buf0, buf1, buf2,
primals_4, primals_5, buf3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class FeatureResizerNew(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
super().__init__()
self.do_ln = do_ln
self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.layer_norm.weight
primals_5 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sudhir11292rt/DefVisTR
|
FeatureResizer
| false
| 1,089
|
[
"Apache-2.0"
] | 0
|
d52b2d88c10c6239de1c1ff851a743c58b708b75
|
https://github.com/Sudhir11292rt/DefVisTR/tree/d52b2d88c10c6239de1c1ff851a743c58b708b75
|
UGRNNLRCell
|
import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class UGRNNLRCell(RNNCell):
"""
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='UGRNNLR'):
super(UGRNNLRCell, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'UGRNNLR'
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
else:
wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
z = gen_nonlinearity(pre_comp1 + self.bias_gate, self.
_gate_nonlinearity)
c = gen_nonlinearity(pre_comp2 + self.bias_update, self.
_update_nonlinearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_U_matrices == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp5
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
primals_6, out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf4, buf5,
buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_7
del primals_8
return buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4,
64), (1, 4), 0)
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class UGRNNLRCellNew(RNNCell):
"""
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='UGRNNLR'):
super(UGRNNLRCellNew, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'UGRNNLR'
def getVars(self):
Vars = []
if self._num_W_matrices == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_U_matrices == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W1
primals_3 = self.W2
primals_4 = self.U1
primals_6 = self.U2
primals_7 = self.bias_gate
primals_8 = self.bias_update
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
ShishirPatil/EdgeML-1
|
UGRNNLRCell
| false
| 1,090
|
[
"MIT"
] | 0
|
cbba9f8b989e545788427c004eb8450e7e4c1a21
|
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
|
FCLayer
|
import torch
import torch.nn as nn
class FCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.0,
use_activation=True):
super(FCLayer, self).__init__()
self.use_activation = use_activation
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Linear(input_dim, output_dim)
self.tanh = nn.Tanh()
def forward(self, x):
x = self.dropout(x)
if self.use_activation:
x = self.tanh(x)
return self.linear(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class FCLayerNew(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.0,
use_activation=True):
super(FCLayerNew, self).__init__()
self.use_activation = use_activation
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Linear(input_dim, output_dim)
self.tanh = nn.Tanh()
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
StevenChaoo/R-BERT-DDI
|
FCLayer
| false
| 1,091
|
[
"MIT"
] | 0
|
6d9666e0bc61397ca942ffad53653690c1e8a899
|
https://github.com/StevenChaoo/R-BERT-DDI/tree/6d9666e0bc61397ca942ffad53653690c1e8a899
|
MultiHeadAttention
|
import math
import torch
import numpy as np
def convert_pad_shape(pad_shape):
"""Reverse, then flatten a list of lists."""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
num_params += np.prod(param.detach().cpu().numpy().shape)
return num_params
def relocate_input(self, x: 'list'):
"""
Relocates provided tensors to the same device set for the module.
"""
device = next(self.parameters()).device
for i in range(len(x)):
if isinstance(x[i], torch.Tensor) and x[i].device != device:
x[i] = x[i]
return x
class MultiHeadAttention(BaseModule):
def __init__(self, channels, out_channels, n_heads, window_size=None,
heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init
=False):
super(MultiHeadAttention, self).__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = torch.nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = torch.nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
self.drop = torch.nn.Dropout(p_dropout)
torch.nn.init.xavier_uniform_(self.conv_q.weight)
torch.nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
torch.nn.init.xavier_uniform_(self.conv_v.weight)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = *key.size(), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self
.k_channels)
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query,
key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(
rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
p_attn = torch.nn.functional.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = torch.nn.functional.pad(
relative_embeddings, convert_pad_shape([[0, 0], [pad_length,
pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
batch, heads, length, _ = x.size()
x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [
0, 0], [0, 1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0],
[0, 0], [0, length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
batch, heads, length, _ = x.size()
x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [
0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0],
[0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf3, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf4 = buf1
del buf1
triton_poi_fused_convolution_0[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = buf2
del buf2
triton_poi_fused_convolution_0[grid(64)](buf8, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4,
4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf10, (4, 4, 4), (16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_0[grid(64)](buf11, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf11, buf7, primals_1, primals_3, primals_4, primals_6,
primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16,
4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
def convert_pad_shape(pad_shape):
"""Reverse, then flatten a list of lists."""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
num_params += np.prod(param.detach().cpu().numpy().shape)
return num_params
def relocate_input(self, x: 'list'):
"""
Relocates provided tensors to the same device set for the module.
"""
device = next(self.parameters()).device
for i in range(len(x)):
if isinstance(x[i], torch.Tensor) and x[i].device != device:
x[i] = x[i]
return x
class MultiHeadAttentionNew(BaseModule):
def __init__(self, channels, out_channels, n_heads, window_size=None,
heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init
=False):
super(MultiHeadAttentionNew, self).__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = torch.nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = torch.nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
self.drop = torch.nn.Dropout(p_dropout)
torch.nn.init.xavier_uniform_(self.conv_q.weight)
torch.nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
torch.nn.init.xavier_uniform_(self.conv_v.weight)
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = *key.size(), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self
.k_channels)
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query,
key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(
rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
p_attn = torch.nn.functional.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = torch.nn.functional.pad(
relative_embeddings, convert_pad_shape([[0, 0], [pad_length,
pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
batch, heads, length, _ = x.size()
x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [
0, 0], [0, 1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0],
[0, 0], [0, length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
batch, heads, length, _ = x.size()
x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [
0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0],
[0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def forward(self, input_0, input_1):
primals_1 = self.conv_q.weight
primals_2 = self.conv_q.bias
primals_4 = self.conv_k.weight
primals_5 = self.conv_k.bias
primals_7 = self.conv_v.weight
primals_8 = self.conv_v.bias
primals_9 = self.conv_o.weight
primals_10 = self.conv_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Sobsz/uberduck-ml-dev
|
MultiHeadAttention
| false
| 1,092
|
[
"Apache-2.0"
] | 0
|
f099238f6f2e3f600d72d89dea3c883c59d91387
|
https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387
|
WNConv2d
|
import torch
from torch import nn
import torch.utils.data
class WNConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride=stride, padding=padding, bias=bias))
self.out_channel = out_channel
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.kernel_size = kernel_size
self.activation = activation
def forward(self, input):
out = self.conv(input)
if self.activation is not None:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2,
primals_1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(16)](buf4, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2
class WNConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride=stride, padding=padding, bias=bias))
self.out_channel = out_channel
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.kernel_size = kernel_size
self.activation = activation
def forward(self, input_0):
primals_3 = self.conv.bias
primals_1 = self.conv.weight_g
primals_2 = self.conv.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Shivanshu-Gupta/KaoKore-VQ-VAE2
|
WNConv2d
| false
| 1,093
|
[
"MIT"
] | 0
|
38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
|
https://github.com/Shivanshu-Gupta/KaoKore-VQ-VAE2/tree/38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
|
DisparityRegression
|
import torch
import torch.nn as nn
import torch.utils.data
class DisparityRegression(nn.Module):
def __init__(self, maxdisp, win_size):
super(DisparityRegression, self).__init__()
self.max_disp = maxdisp
self.win_size = win_size
def forward(self, x):
disp = torch.arange(0, self.max_disp).view(1, -1, 1, 1).float()
if self.win_size > 0:
max_d = torch.argmax(x, dim=1, keepdim=True)
d_value = []
prob_value = []
for d in range(-self.win_size, self.win_size + 1):
index = max_d + d
index[index < 0] = 0
index[index > x.shape[1] - 1] = x.shape[1] - 1
d_value.append(index)
prob = torch.gather(x, dim=1, index=index)
prob_value.append(prob)
part_x = torch.cat(prob_value, dim=1)
part_x = part_x / (torch.sum(part_x, dim=1, keepdim=True) + 1e-08)
part_d = torch.cat(d_value, dim=1).float()
out = torch.sum(part_x * part_d, dim=1)
else:
out = torch.sum(x * disp, 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'maxdisp': 4, 'win_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0(in_ptr0,
out_ptr2, out_ptr4, out_ptr6, out_ptr8, out_ptr10, out_ptr12, out_ptr14,
out_ptr16, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22,
out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1], -4, tl.int64)
tmp48 = tmp46 + tmp47
tmp49 = tmp48 < tmp10
tmp50 = tl.where(tmp49, tmp10, tmp48)
tmp51 = tmp50 > tmp41
tmp52 = tl.where(tmp51, tmp41, tmp50)
tmp53 = tl.full([1], -3, tl.int64)
tmp54 = tmp46 + tmp53
tmp55 = tmp54 < tmp10
tmp56 = tl.where(tmp55, tmp10, tmp54)
tmp57 = tmp56 > tmp41
tmp58 = tl.where(tmp57, tmp41, tmp56)
tmp59 = tl.full([1], -2, tl.int64)
tmp60 = tmp46 + tmp59
tmp61 = tmp60 < tmp10
tmp62 = tl.where(tmp61, tmp10, tmp60)
tmp63 = tmp62 > tmp41
tmp64 = tl.where(tmp63, tmp41, tmp62)
tmp65 = tl.full([1], -1, tl.int64)
tmp66 = tmp46 + tmp65
tmp67 = tmp66 < tmp10
tmp68 = tl.where(tmp67, tmp10, tmp66)
tmp69 = tmp68 > tmp41
tmp70 = tl.where(tmp69, tmp41, tmp68)
tmp71 = tmp46 + tmp10
tmp72 = tmp71 < tmp10
tmp73 = tl.where(tmp72, tmp10, tmp71)
tmp74 = tmp73 > tmp41
tmp75 = tl.where(tmp74, tmp41, tmp73)
tmp76 = tmp46 + tmp11
tmp77 = tmp76 < tmp10
tmp78 = tl.where(tmp77, tmp10, tmp76)
tmp79 = tmp78 > tmp41
tmp80 = tl.where(tmp79, tmp41, tmp78)
tmp81 = tmp46 + tmp26
tmp82 = tmp81 < tmp10
tmp83 = tl.where(tmp82, tmp10, tmp81)
tmp84 = tmp83 > tmp41
tmp85 = tl.where(tmp84, tmp41, tmp83)
tmp86 = tmp46 + tmp41
tmp87 = tmp86 < tmp10
tmp88 = tl.where(tmp87, tmp10, tmp86)
tmp89 = tmp88 > tmp41
tmp90 = tl.where(tmp89, tmp41, tmp88)
tmp91 = tl.full([1], 4, tl.int64)
tmp92 = tmp46 + tmp91
tmp93 = tmp92 < tmp10
tmp94 = tl.where(tmp93, tmp10, tmp92)
tmp95 = tmp94 > tmp41
tmp96 = tl.where(tmp95, tmp41, tmp94)
tmp97 = tl.full([XBLOCK], 4, tl.int32)
tmp98 = tmp52 + tmp97
tmp99 = tmp52 < 0
tmp100 = tl.where(tmp99, tmp98, tmp52)
tl.device_assert((0 <= tmp100) & (tmp100 < 4) | ~xmask,
'index out of bounds: 0 <= tmp100 < 4')
tmp102 = tl.load(in_ptr0 + (x0 + 16 * tmp100 + 64 * x1), xmask)
tmp103 = tmp58 + tmp97
tmp104 = tmp58 < 0
tmp105 = tl.where(tmp104, tmp103, tmp58)
tl.device_assert((0 <= tmp105) & (tmp105 < 4) | ~xmask,
'index out of bounds: 0 <= tmp105 < 4')
tmp107 = tl.load(in_ptr0 + (x0 + 16 * tmp105 + 64 * x1), xmask)
tmp108 = tmp64 + tmp97
tmp109 = tmp64 < 0
tmp110 = tl.where(tmp109, tmp108, tmp64)
tl.device_assert((0 <= tmp110) & (tmp110 < 4) | ~xmask,
'index out of bounds: 0 <= tmp110 < 4')
tmp112 = tl.load(in_ptr0 + (x0 + 16 * tmp110 + 64 * x1), xmask)
tmp113 = tmp70 + tmp97
tmp114 = tmp70 < 0
tmp115 = tl.where(tmp114, tmp113, tmp70)
tl.device_assert((0 <= tmp115) & (tmp115 < 4) | ~xmask,
'index out of bounds: 0 <= tmp115 < 4')
tmp117 = tl.load(in_ptr0 + (x0 + 16 * tmp115 + 64 * x1), xmask)
tmp118 = tmp75 + tmp97
tmp119 = tmp75 < 0
tmp120 = tl.where(tmp119, tmp118, tmp75)
tl.device_assert((0 <= tmp120) & (tmp120 < 4) | ~xmask,
'index out of bounds: 0 <= tmp120 < 4')
tmp122 = tl.load(in_ptr0 + (x0 + 16 * tmp120 + 64 * x1), xmask)
tmp123 = tmp80 + tmp97
tmp124 = tmp80 < 0
tmp125 = tl.where(tmp124, tmp123, tmp80)
tl.device_assert((0 <= tmp125) & (tmp125 < 4) | ~xmask,
'index out of bounds: 0 <= tmp125 < 4')
tmp127 = tl.load(in_ptr0 + (x0 + 16 * tmp125 + 64 * x1), xmask)
tmp128 = tmp85 + tmp97
tmp129 = tmp85 < 0
tmp130 = tl.where(tmp129, tmp128, tmp85)
tl.device_assert((0 <= tmp130) & (tmp130 < 4) | ~xmask,
'index out of bounds: 0 <= tmp130 < 4')
tmp132 = tl.load(in_ptr0 + (x0 + 16 * tmp130 + 64 * x1), xmask)
tmp133 = tmp90 + tmp97
tmp134 = tmp90 < 0
tmp135 = tl.where(tmp134, tmp133, tmp90)
tl.device_assert((0 <= tmp135) & (tmp135 < 4) | ~xmask,
'index out of bounds: 0 <= tmp135 < 4')
tmp137 = tl.load(in_ptr0 + (x0 + 16 * tmp135 + 64 * x1), xmask)
tmp138 = tmp96 + tmp97
tmp139 = tmp96 < 0
tmp140 = tl.where(tmp139, tmp138, tmp96)
tl.device_assert((0 <= tmp140) & (tmp140 < 4) | ~xmask,
'index out of bounds: 0 <= tmp140 < 4')
tmp142 = tl.load(in_ptr0 + (x0 + 16 * tmp140 + 64 * x1), xmask)
tl.store(out_ptr2 + (x0 + 144 * x1), tmp52, xmask)
tl.store(out_ptr4 + (x0 + 144 * x1), tmp58, xmask)
tl.store(out_ptr6 + (x0 + 144 * x1), tmp64, xmask)
tl.store(out_ptr8 + (x0 + 144 * x1), tmp70, xmask)
tl.store(out_ptr10 + (x0 + 144 * x1), tmp75, xmask)
tl.store(out_ptr12 + (x0 + 144 * x1), tmp80, xmask)
tl.store(out_ptr14 + (x0 + 144 * x1), tmp85, xmask)
tl.store(out_ptr16 + (x0 + 144 * x1), tmp90, xmask)
tl.store(out_ptr18 + (x0 + 144 * x1), tmp96, xmask)
tl.store(out_ptr19 + 9 * x2, tmp102, xmask)
tl.store(out_ptr20 + 9 * x2, tmp107, xmask)
tl.store(out_ptr21 + 9 * x2, tmp112, xmask)
tl.store(out_ptr22 + 9 * x2, tmp117, xmask)
tl.store(out_ptr23 + 9 * x2, tmp122, xmask)
tl.store(out_ptr24 + 9 * x2, tmp127, xmask)
tl.store(out_ptr25 + 9 * x2, tmp132, xmask)
tl.store(out_ptr26 + 9 * x2, tmp137, xmask)
tl.store(out_ptr27 + 9 * x2, tmp142, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_mul_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 16
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (x2 + 16 * r1 + 144 * x3), rmask & xmask,
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-08
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask & xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(in_out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf30 = empty_strided_cuda((4, 9, 4, 4), (144, 16, 4, 1), torch.int64)
buf2 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 0)
buf4 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 16)
buf6 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 32)
buf8 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 48)
buf10 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 64)
buf12 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 80)
buf14 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 96)
buf16 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 112)
buf18 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 128)
buf28 = empty_strided_cuda((4, 9, 4, 4), (144, 1, 36, 9), torch.float32
)
buf19 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 0)
buf20 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 1)
buf21 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 2)
buf22 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 3)
buf23 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 4)
buf24 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 5)
buf25 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 6)
buf26 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 7)
buf27 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 8)
get_raw_stream(0)
triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0[grid(64)](
arg0_1, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16,
buf18, buf19, buf20, buf21, buf22, buf23, buf24, buf25, buf26,
buf27, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf29 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf31 = reinterpret_tensor(buf29, (4, 4, 4), (16, 4, 1), 0)
del buf29
triton_per_fused__to_copy_add_div_mul_sum_1[grid(64)](buf31, buf28,
buf30, 64, 9, XBLOCK=8, num_warps=2, num_stages=1)
del buf10
del buf12
del buf14
del buf16
del buf18
del buf19
del buf2
del buf20
del buf21
del buf22
del buf23
del buf24
del buf25
del buf26
del buf27
del buf28
del buf30
del buf4
del buf6
del buf8
return buf31,
class DisparityRegressionNew(nn.Module):
def __init__(self, maxdisp, win_size):
super(DisparityRegressionNew, self).__init__()
self.max_disp = maxdisp
self.win_size = win_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SpadeLiu/Graft-PSMNet
|
DisparityRegression
| false
| 1,094
|
[
"MIT"
] | 0
|
1f2950d5afd85237f8d3604caab20dd47a8c9889
|
https://github.com/SpadeLiu/Graft-PSMNet/tree/1f2950d5afd85237f8d3604caab20dd47a8c9889
|
Message_Passing_Unit_v1
|
import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Message_Passing_Unit_v1(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v1, self).__init__()
self.w = nn.Linear(fea_size * 2, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, unary_term, pair_term):
if unary_term.size()[0] == 1 and pair_term.size()[0] > 1:
unary_term = unary_term.expand(pair_term.size()[0], unary_term.
size()[1])
if unary_term.size()[0] > 1 and pair_term.size()[0] == 1:
pair_term = pair_term.expand(unary_term.size()[0], pair_term.
size()[1])
gate = torch.cat([unary_term, pair_term], 1)
gate = F.relu(gate)
gate = torch.sigmoid(self.w(gate)).mean(1)
output = pair_term * gate.view(-1, 1).expand(gate.size()[0],
pair_term.size()[1])
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'fea_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_per_fused_mean_sigmoid_1(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = 128.0
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (128, 8), (8, 1))
assert_size_stride(primals_4, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_relu_0[grid(32)](primals_1, primals_2, buf0,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 128), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mean_sigmoid_1[grid(4)](buf1, buf2, 4, 128, XBLOCK
=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](primals_2, buf2, buf3, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_2, buf0, buf1
class Message_Passing_Unit_v1New(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v1New, self).__init__()
self.w = nn.Linear(fea_size * 2, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, input_0, input_1):
primals_3 = self.w.weight
primals_4 = self.w.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
SpartaG117/scene_graph_benchmark
|
Message_Passing_Unit_v1
| false
| 1,095
|
[
"MIT"
] | 0
|
e2e49940dd2f752b1faf9ae26707435ba3441bcb
|
https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb
|
ExpModule
|
import torch
import torch.nn as nn
class ExpModule(nn.Module):
def __init__(self):
super(ExpModule, self).__init__()
def forward(self, x):
return torch.exp(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ExpModuleNew(nn.Module):
def __init__(self):
super(ExpModuleNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SimonTreu/sdvae
|
ExpModule
| false
| 1,096
|
[
"MIT"
] | 0
|
e0270b9b2acf2d66eec93870f1c5633c8f04d9ab
|
https://github.com/SimonTreu/sdvae/tree/e0270b9b2acf2d66eec93870f1c5633c8f04d9ab
|
EncoderLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, key_sequence_length, size_per_head = key.size()
query = query.view(batch_size, num_head, query_sequence_length,
size_per_head)
key = key.view(batch_size, num_head, size_per_head, key_sequence_length
)
attention_score = torch.einsum('abcd, abde -> abce', query, key)
attention_score = attention_score / math.sqrt(size_per_head)
if mask is not None:
attention_score = attention_score.masked_fill(mask == 0, -
1000000000.0)
attention_score = F.softmax(attention_score, dim=-1)
result = attention_score @ value
return result, attention_score
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, num_head):
super(MultiHeadAttention, self).__init__()
self.model_dim = model_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.num_head = num_head
self.Wq = nn.Linear(model_dim, key_dim)
self.Wk = nn.Linear(model_dim, key_dim)
self.Wv = nn.Linear(model_dim, value_dim)
self.attention = ScaledDotProductAttention()
self.Wo = nn.Linear(value_dim, model_dim)
def forward(self, query, key, value, mask=None):
prj_query = self.Wq(query)
prj_key = self.Wk(key)
prj_value = self.Wv(value)
multihead_query = self.multihead_split(prj_query)
multihead_key = self.multihead_split(prj_key)
multihead_value = self.multihead_split(prj_value)
attention_output, _attention_score = self.attention(multihead_query,
multihead_key, multihead_value, mask=mask)
output = self.multihead_concat(attention_output)
output = self.Wo(output)
return output
def multihead_split(self, tensor):
batch_size, sequence_length, hidden_size = tensor.size()
size_per_head = hidden_size // self.num_head
return tensor.view(batch_size, self.num_head, sequence_length,
size_per_head)
def multihead_concat(self, tensor):
batch_size, num_head, sequence_length, size_per_head = tensor.size()
hidden_size = num_head * size_per_head
return tensor.view(batch_size, sequence_length, hidden_size)
class FeedForward(nn.Module):
def __init__(self, model_dim, hidden_dim, drop_prob):
super(FeedForward, self).__init__()
self.model_dim = model_dim
self.hidden_dim = hidden_dim
self.drop_prob = drop_prob
self.linearlayer1 = nn.Linear(model_dim, hidden_dim)
self.linearlayer2 = nn.Linear(hidden_dim, model_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(drop_prob)
def forward(self, tensor):
tensor = self.dropout(self.relu(self.linearlayer1(tensor)))
return self.linearlayer2(tensor)
class EncoderLayer(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head,
drop_prob):
super(EncoderLayer, self).__init__()
self.attention = MultiHeadAttention(model_dim, key_dim, value_dim,
num_head)
self.normalization1 = nn.LayerNorm(model_dim)
self.dropout1 = nn.Dropout(drop_prob)
self.ffn = FeedForward(model_dim, hidden_dim, drop_prob)
self.normalization2 = nn.LayerNorm(model_dim)
self.dropout2 = nn.Dropout(drop_prob)
def forward(self, tensor, source_mask):
residual = tensor
tensor = self.attention(query=tensor, key=tensor, value=tensor,
mask=source_mask)
tensor = self.dropout1(self.normalization1(tensor + residual))
residual = tensor
tensor = self.ffn(tensor)
tensor = self.dropout2(self.normalization2(tensor + residual))
return tensor
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4, 'key_dim': 4, 'value_dim': 4, 'hidden_dim':
4, 'num_head': 4, 'drop_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp15 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp21 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = -1000000000.0
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp10 = tmp1 * tmp9
tmp11 = tmp10 * tmp4
tmp12 = tl.where(tmp8, tmp6, tmp11)
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp16 = tmp1 * tmp15
tmp17 = tmp16 * tmp4
tmp18 = tl.where(tmp14, tmp6, tmp17)
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp22 = tmp1 * tmp21
tmp23 = tmp22 * tmp4
tmp24 = tl.where(tmp20, tmp6, tmp23)
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tmp26 = tmp7 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp12 - tmp25
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp18 - tmp25
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp24 - tmp25
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tl.store(out_ptr0 + x2, tmp25, xmask)
tl.store(out_ptr1 + x2, tmp36, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = -1000000000.0
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_0[grid(256)](primals_8, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_8
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf3, buf0,
buf1, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf3, buf0,
buf1, buf4, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf7)
buf8 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_10
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf8, primals_1,
buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(64)](buf8, primals_1,
buf9, buf10, primals_11, primals_12, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf13,
primals_14, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0)
del buf14
triton_poi_fused_add_6[grid(64)](buf15, primals_16, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf16 = buf9
del buf9
buf17 = buf10
del buf10
triton_poi_fused_native_layer_norm_7[grid(16)](buf15, buf16, buf17,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_8[grid(64)](buf15, buf16, buf17,
primals_17, primals_18, buf18, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf16
del buf17
del primals_18
return (buf18, primals_1, primals_11, primals_17, buf0, buf1, buf3,
buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8,
reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(
buf13, (16, 4), (4, 1), 0), buf15, primals_15, buf19, primals_13,
primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0))
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, key_sequence_length, size_per_head = key.size()
query = query.view(batch_size, num_head, query_sequence_length,
size_per_head)
key = key.view(batch_size, num_head, size_per_head, key_sequence_length
)
attention_score = torch.einsum('abcd, abde -> abce', query, key)
attention_score = attention_score / math.sqrt(size_per_head)
if mask is not None:
attention_score = attention_score.masked_fill(mask == 0, -
1000000000.0)
attention_score = F.softmax(attention_score, dim=-1)
result = attention_score @ value
return result, attention_score
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, num_head):
super(MultiHeadAttention, self).__init__()
self.model_dim = model_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.num_head = num_head
self.Wq = nn.Linear(model_dim, key_dim)
self.Wk = nn.Linear(model_dim, key_dim)
self.Wv = nn.Linear(model_dim, value_dim)
self.attention = ScaledDotProductAttention()
self.Wo = nn.Linear(value_dim, model_dim)
def forward(self, query, key, value, mask=None):
prj_query = self.Wq(query)
prj_key = self.Wk(key)
prj_value = self.Wv(value)
multihead_query = self.multihead_split(prj_query)
multihead_key = self.multihead_split(prj_key)
multihead_value = self.multihead_split(prj_value)
attention_output, _attention_score = self.attention(multihead_query,
multihead_key, multihead_value, mask=mask)
output = self.multihead_concat(attention_output)
output = self.Wo(output)
return output
def multihead_split(self, tensor):
batch_size, sequence_length, hidden_size = tensor.size()
size_per_head = hidden_size // self.num_head
return tensor.view(batch_size, self.num_head, sequence_length,
size_per_head)
def multihead_concat(self, tensor):
batch_size, num_head, sequence_length, size_per_head = tensor.size()
hidden_size = num_head * size_per_head
return tensor.view(batch_size, sequence_length, hidden_size)
class FeedForward(nn.Module):
def __init__(self, model_dim, hidden_dim, drop_prob):
super(FeedForward, self).__init__()
self.model_dim = model_dim
self.hidden_dim = hidden_dim
self.drop_prob = drop_prob
self.linearlayer1 = nn.Linear(model_dim, hidden_dim)
self.linearlayer2 = nn.Linear(hidden_dim, model_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(drop_prob)
def forward(self, tensor):
tensor = self.dropout(self.relu(self.linearlayer1(tensor)))
return self.linearlayer2(tensor)
class EncoderLayerNew(nn.Module):
def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head,
drop_prob):
super(EncoderLayerNew, self).__init__()
self.attention = MultiHeadAttention(model_dim, key_dim, value_dim,
num_head)
self.normalization1 = nn.LayerNorm(model_dim)
self.dropout1 = nn.Dropout(drop_prob)
self.ffn = FeedForward(model_dim, hidden_dim, drop_prob)
self.normalization2 = nn.LayerNorm(model_dim)
self.dropout2 = nn.Dropout(drop_prob)
def forward(self, input_0, input_1):
primals_2 = self.attention.Wq.weight
primals_3 = self.attention.Wq.bias
primals_4 = self.attention.Wk.weight
primals_5 = self.attention.Wk.bias
primals_6 = self.attention.Wv.weight
primals_7 = self.attention.Wv.bias
primals_9 = self.attention.Wo.weight
primals_10 = self.attention.Wo.bias
primals_11 = self.normalization1.weight
primals_12 = self.normalization1.bias
primals_13 = self.ffn.linearlayer1.weight
primals_14 = self.ffn.linearlayer1.bias
primals_15 = self.ffn.linearlayer2.weight
primals_16 = self.ffn.linearlayer2.bias
primals_17 = self.normalization2.weight
primals_18 = self.normalization2.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
SeungoneKim/Transformer_implementation
|
EncoderLayer
| false
| 1,097
|
[
"Apache-2.0"
] | 0
|
a52bf552eb645fc9bfb812cc26842fc147d6c008
|
https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008
|
Residual_Covolution
|
import torch
import torch.nn as nn
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding
=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1,
padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding
=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
dow1 = self.conv1(x)
dow1 = self.relu(dow1)
seg = self.conv2(dow1)
inc1 = self.conv3(seg)
add1 = dow1 + self.relu(inc1)
inc2 = self.conv4(add1)
out = x + self.relu(inc2)
return out, seg
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'icol': 4, 'ocol': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(12, 12), dilation=(12, 12), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(12, 12), dilation=(12, 12), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_2[grid(256)](
buf1, buf4, primals_7, buf5, buf9, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf4
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_2[grid(256)](
primals_3, buf6, primals_9, buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
del primals_9
return (buf7, buf3, primals_1, primals_3, primals_4, primals_6,
primals_8, buf1, buf3, buf5, buf8, buf9)
class Residual_CovolutionNew(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_CovolutionNew, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding
=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1,
padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding
=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
SultanAbuGhazal/CGNet
|
Residual_Covolution
| false
| 1,098
|
[
"MIT"
] | 0
|
f10b976b984ba09be26b902ed4da97cd1311cf17
|
https://github.com/SultanAbuGhazal/CGNet/tree/f10b976b984ba09be26b902ed4da97cd1311cf17
|
ResidualBlockNoBN
|
import torch
from torch import nn
class ResidualBlockNoBN(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlockNoBN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=stride, padding=1,
bias=True)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=(1, 1), stride=
stride, bias=False))
def forward(self, x):
out = nn.ReLU()(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = nn.ReLU()(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf3, primals_5, primals_3, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class ResidualBlockNoBNNew(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlockNoBNNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=stride, padding=1,
bias=True)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=(1, 1), stride=
stride, bias=False))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Suvapna/ArtificialLaughter
|
ResidualBlockNoBN
| false
| 1,100
|
[
"MIT"
] | 0
|
a7114134b698f829e05e74cac30052e18b260f85
|
https://github.com/Suvapna/ArtificialLaughter/tree/a7114134b698f829e05e74cac30052e18b260f85
|
SpatialAttention
|
import torch
import torch.nn as nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7, bias=True):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=bias)
self.sigmoid = nn.Sigmoid()
def init_weighs(self):
normal_init(self.spatial_layer.conv1, std=0.01)
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=7, bias=True):
super(SpatialAttentionNew, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=bias)
self.sigmoid = nn.Sigmoid()
def init_weighs(self):
normal_init(self.spatial_layer.conv1, std=0.01)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
SuzaKrish/mmdetection
|
SpatialAttention
| false
| 1,101
|
[
"Apache-2.0"
] | 0
|
31c16891d7493252262e738bcbf05326dba866b2
|
https://github.com/SuzaKrish/mmdetection/tree/31c16891d7493252262e738bcbf05326dba866b2
|
Message_Passing_Unit_v2
|
import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Message_Passing_Unit_v2(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v2, self).__init__()
self.w = nn.Linear(fea_size, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, unary_term, pair_term):
if unary_term.size()[0] == 1 and pair_term.size()[0] > 1:
unary_term = unary_term.expand(pair_term.size()[0], unary_term.
size()[1])
if unary_term.size()[0] > 1 and pair_term.size()[0] == 1:
pair_term = pair_term.expand(unary_term.size()[0], pair_term.
size()[1])
gate = self.w(F.relu(unary_term)) * self.w(F.relu(pair_term))
gate = torch.sigmoid(gate.sum(1))
output = pair_term * gate.expand(gate.size()[0], pair_term.size()[1])
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'fea_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 128 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.sigmoid(tmp6)
tmp8 = 1.0
tmp9 = tmp8 - tmp7
tmp10 = tmp7 * tmp9
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (128, 4), (4, 1))
assert_size_stride(primals_4, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(4, 128), (1, 4), 0), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_relu_0[grid(16)](primals_2, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3,
(4, 128), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_3
del primals_4
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1[grid(4)](buf1,
buf3, buf4, buf6, 4, 128, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](primals_2, buf4, buf5, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf4
return buf5, primals_2, buf0, buf1, buf2, buf3, buf6
class Message_Passing_Unit_v2New(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v2New, self).__init__()
self.w = nn.Linear(fea_size, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, input_0, input_1):
primals_3 = self.w.weight
primals_4 = self.w.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
SpartaG117/scene_graph_benchmark
|
Message_Passing_Unit_v2
| false
| 1,102
|
[
"MIT"
] | 0
|
e2e49940dd2f752b1faf9ae26707435ba3441bcb
|
https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb
|
PositionalEmbedding
|
import math
import torch
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError('The rank of input must be 3.')
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype, device=inputs.
device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=
inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp0.to(tl.float32)
tmp6 = -9.210340371976184
tmp7 = tmp5 * tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = x1
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp10 * tmp8
tmp12 = tl_math.sin(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp18 = -2 + x0
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp19 * tmp6
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp10 * tmp21
tmp23 = tl_math.cos(tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_1[grid(64)](arg0_1, buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
del buf0
return buf1,
class PositionalEmbeddingNew(torch.nn.Module):
def __init__(self):
super(PositionalEmbeddingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
THUNLP-MT/PLM4MT
|
PositionalEmbedding
| false
| 1,103
|
[
"BSD-3-Clause"
] | 0
|
85bd2ee9d96b07ac827e14d4b3e5b0d0924c3401
|
https://github.com/THUNLP-MT/PLM4MT/tree/85bd2ee9d96b07ac827e14d4b3e5b0d0924c3401
|
MaxPool
|
import torch
import torch.nn as nn
class MaxPool(nn.Module):
def __init__(self, dim=1):
super(MaxPool, self).__init__()
self.dim = dim
def forward(self, input):
return torch.max(input, self.dim)[0]
def __repr__(self):
return self.__class__.__name__ + ' (' + 'dim=' + str(self.dim) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MaxPoolNew(nn.Module):
def __init__(self, dim=1):
super(MaxPoolNew, self).__init__()
self.dim = dim
def __repr__(self):
return self.__class__.__name__ + ' (' + 'dim=' + str(self.dim) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SwaggyZhang/Geometry-aware
|
MaxPool
| false
| 1,104
|
[
"Apache-2.0"
] | 0
|
a750c00aa2f0bda5160dfdeee2eef5230fd9d993
|
https://github.com/SwaggyZhang/Geometry-aware/tree/a750c00aa2f0bda5160dfdeee2eef5230fd9d993
|
Transpose
|
import torch
import torch.nn as nn
class Transpose(nn.Module):
def __init__(self, dim1=0, dim2=1):
super(Transpose, self).__init__()
self.dim1 = dim1
self.dim2 = dim2
def forward(self, input):
return input.transpose(self.dim1, self.dim2).contiguous()
def __repr__(self):
return self.__class__.__name__ + ' (' + 'between=' + str(self.dim1
) + ',' + str(self.dim2) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TransposeNew(nn.Module):
def __init__(self, dim1=0, dim2=1):
super(TransposeNew, self).__init__()
self.dim1 = dim1
self.dim2 = dim2
def __repr__(self):
return self.__class__.__name__ + ' (' + 'between=' + str(self.dim1
) + ',' + str(self.dim2) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SwaggyZhang/Geometry-aware
|
Transpose
| false
| 1,105
|
[
"Apache-2.0"
] | 0
|
a750c00aa2f0bda5160dfdeee2eef5230fd9d993
|
https://github.com/SwaggyZhang/Geometry-aware/tree/a750c00aa2f0bda5160dfdeee2eef5230fd9d993
|
GraphConv
|
import torch
from torch import nn
import torch.nn
import torch.autograd
def sparse_bmm(sparse_matrix, dense_matrix_batch):
"""
Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix.
Args:
sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n)
dense_matrix_batch (torch.FloatTensor): Shape = (b, n, p)
Returns:
(torch.FloatTensor):
Result of the batched matrix multiplication. Shape = (b, n, p)
"""
m = sparse_matrix.shape[0]
b, n, p = dense_matrix_batch.shape
dense_matrix = dense_matrix_batch.transpose(0, 1).reshape(n, b * p)
result = torch.sparse.mm(sparse_matrix, dense_matrix)
return result.reshape(m, b, p).transpose(0, 1)
class GraphConv(nn.Module):
"""
A simple graph convolution layer, similar to the one defined in Kipf et al.
https://arxiv.org/abs/1609.02907
This operation with self_layer=False is equivalent to :math:`(A H W)` where:
- :math:`H` is the node features with shape (batch_size, num_nodes, input_dim)
- :math:`W` is a weight matrix of shape (input_dim, output_dim)
- :math:`A` is the adjacency matrix of shape (num_nodes, num_nodes).
It can include self-loop.
With normalize_adj=True, it is equivalent to :math:`(D^{-1} A H W)`, where:
- :math:`D` is a diagonal matrix with :math:`D_{ii}` = the sum of the i-th row of A.
In other words, :math:`D` is the incoming degree of each node.
With self_layer=True, it is equivalent to the above plus :math:`(H W_{\\text{self}})`, where:
- :math:`W_{\\text{self}}` is a separate weight matrix to filter each node's self features.
Note that when self_layer is True, A should not include self-loop.
Example:
>>> node_feat = torch.rand(1, 3, 5)
>>> i = torch.LongTensor(
... [[0, 1, 1, 2, 2, 0], [1, 0, 2, 1, 0, 2]])
>>> v = torch.FloatTensor([1, 1, 1, 1, 1, 1])
>>> adj = torch.sparse.FloatTensor(i, v, torch.Size([3, 3]))
>>> model = GraphConv(5, 10)
>>> output = model(node_feat, adj)
>>> # pre-normalize adj
>>> adj = normalize_adj(adj)
>>> output = model(node_feat, adj, normalize_adj=False)
If you use this code, please cite the original paper in addition to Kaolin.
.. code-block::
@article{kipf2016semi,
title={Semi-Supervised Classification with Graph Convolutional Networks},
author={Kipf, Thomas N and Welling, Max},
journal={arXiv preprint arXiv:1609.02907},
year={2016}
}
Args:
input_dim (int): The number of features in each input node.
output_dim (int): The number of features in each output node.
bias (bool): Whether to add bias after the node-wise linear layer.
"""
def __init__(self, input_dim, output_dim, self_layer=True, bias=True):
super(GraphConv, self).__init__()
self.self_layer = self_layer
self.linear = nn.Linear(input_dim, output_dim, bias=bias)
if self_layer:
self.linear_self = nn.Linear(input_dim, output_dim, bias=bias)
else:
self.linear_self = None
self.initialize()
def initialize(self):
nn.init.xavier_uniform_(self.linear.weight.data)
if self.linear.bias is not None:
self.linear.bias.data.uniform_(-1.0, 1.0)
if self.self_layer:
nn.init.xavier_uniform_(self.linear_self.weight.data)
if self.linear_self.bias is not None:
self.linear_self.bias.data.uniform_(-1.0, 1.0)
def forward(self, node_feat, adj, normalize_adj=True):
"""
Args:
node_feat (torch.FloatTensor):
Shape = (batch_size, num_nodes, input_dim)
The input features of each node.
adj (torch.sparse.FloatTensor or torch.FloatTensor):
Shape = (num_nodes, num_nodes)
The adjacency matrix. adj[i, j] is non-zero if there's an
incoming edge from j to i. Should not include self-loop if
self_layer is True.
normalize_adj (bool):
Set this to true to apply normalization to adjacency; that is,
each output feature will be divided by the number of incoming
neighbors. If normalization is not desired, or if the adjacency
matrix is pre-normalized, set this to False to improve
performance.
Returns:
(torch.FloatTensor):
The output features of each node.
Shape = (batch_size, num_nodes, output_dim)
"""
if adj.type().endswith('sparse.FloatTensor'):
if normalize_adj:
norm = torch.sparse.mm(adj, torch.ones((adj.shape[0], 1),
device=node_feat.device))
result = sparse_bmm(adj, self.linear(node_feat)) / norm
else:
result = sparse_bmm(adj, self.linear(node_feat))
elif normalize_adj:
norm = torch.matmul(adj, torch.ones((adj.shape[0], 1), device=
node_feat.device))
result = torch.matmul(adj, self.linear(node_feat)) / norm
else:
result = torch.matmul(adj, self.linear(node_feat))
if self.self_layer:
result += self.linear_self(node_feat)
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_ones_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_ones_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (64,
4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
buf4 = buf2
del buf2
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
del primals_5
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_div_1[grid(256)](buf5, buf1, buf4, primals_6,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_6
return buf5, buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4, 4), (16, 1, 4), 0)
def sparse_bmm(sparse_matrix, dense_matrix_batch):
"""
Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix.
Args:
sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n)
dense_matrix_batch (torch.FloatTensor): Shape = (b, n, p)
Returns:
(torch.FloatTensor):
Result of the batched matrix multiplication. Shape = (b, n, p)
"""
m = sparse_matrix.shape[0]
b, n, p = dense_matrix_batch.shape
dense_matrix = dense_matrix_batch.transpose(0, 1).reshape(n, b * p)
result = torch.sparse.mm(sparse_matrix, dense_matrix)
return result.reshape(m, b, p).transpose(0, 1)
class GraphConvNew(nn.Module):
"""
A simple graph convolution layer, similar to the one defined in Kipf et al.
https://arxiv.org/abs/1609.02907
This operation with self_layer=False is equivalent to :math:`(A H W)` where:
- :math:`H` is the node features with shape (batch_size, num_nodes, input_dim)
- :math:`W` is a weight matrix of shape (input_dim, output_dim)
- :math:`A` is the adjacency matrix of shape (num_nodes, num_nodes).
It can include self-loop.
With normalize_adj=True, it is equivalent to :math:`(D^{-1} A H W)`, where:
- :math:`D` is a diagonal matrix with :math:`D_{ii}` = the sum of the i-th row of A.
In other words, :math:`D` is the incoming degree of each node.
With self_layer=True, it is equivalent to the above plus :math:`(H W_{\\text{self}})`, where:
- :math:`W_{\\text{self}}` is a separate weight matrix to filter each node's self features.
Note that when self_layer is True, A should not include self-loop.
Example:
>>> node_feat = torch.rand(1, 3, 5)
>>> i = torch.LongTensor(
... [[0, 1, 1, 2, 2, 0], [1, 0, 2, 1, 0, 2]])
>>> v = torch.FloatTensor([1, 1, 1, 1, 1, 1])
>>> adj = torch.sparse.FloatTensor(i, v, torch.Size([3, 3]))
>>> model = GraphConv(5, 10)
>>> output = model(node_feat, adj)
>>> # pre-normalize adj
>>> adj = normalize_adj(adj)
>>> output = model(node_feat, adj, normalize_adj=False)
If you use this code, please cite the original paper in addition to Kaolin.
.. code-block::
@article{kipf2016semi,
title={Semi-Supervised Classification with Graph Convolutional Networks},
author={Kipf, Thomas N and Welling, Max},
journal={arXiv preprint arXiv:1609.02907},
year={2016}
}
Args:
input_dim (int): The number of features in each input node.
output_dim (int): The number of features in each output node.
bias (bool): Whether to add bias after the node-wise linear layer.
"""
def __init__(self, input_dim, output_dim, self_layer=True, bias=True):
super(GraphConvNew, self).__init__()
self.self_layer = self_layer
self.linear = nn.Linear(input_dim, output_dim, bias=bias)
if self_layer:
self.linear_self = nn.Linear(input_dim, output_dim, bias=bias)
else:
self.linear_self = None
self.initialize()
def initialize(self):
nn.init.xavier_uniform_(self.linear.weight.data)
if self.linear.bias is not None:
self.linear.bias.data.uniform_(-1.0, 1.0)
if self.self_layer:
nn.init.xavier_uniform_(self.linear_self.weight.data)
if self.linear_self.bias is not None:
self.linear_self.bias.data.uniform_(-1.0, 1.0)
def forward(self, input_0, input_1):
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_5 = self.linear_self.weight
primals_6 = self.linear_self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
T0mt0mp/kaolin
|
GraphConv
| false
| 1,106
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
57d1e1478eec8df49dc7cc492f25637cec40399f
|
https://github.com/T0mt0mp/kaolin/tree/57d1e1478eec8df49dc7cc492f25637cec40399f
|
Align
|
import torch
import torch.nn.functional as F
class Align(torch.nn.Module):
def __init__(self, p):
super(Align, self).__init__()
self.p = p
def forward(self, e1, e2):
pred = -torch.norm(e1 - e2, p=self.p, dim=1)
return pred
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_linalg_vector_norm_neg_sub_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = tmp11 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 0.25
tmp24 = libdevice.pow(tmp22, tmp23)
tmp25 = -tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_neg_sub_0[grid(64)](arg0_1,
arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AlignNew(torch.nn.Module):
def __init__(self, p):
super(AlignNew, self).__init__()
self.p = p
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
TMUITLab/EAFR
|
Align
| false
| 1,108
|
[
"MIT"
] | 0
|
dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
MNISTGenerator
|
import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
class MNISTGenerator(nn.Module):
def __init__(self, latent_dim):
super(MNISTGenerator, self).__init__()
self.image_shape = 1, 28, 28
self.latent_dim = latent_dim
self.dense1 = nn.Linear(self.latent_dim, 128, True)
self.dense2 = nn.Linear(128, 784, True)
def forward(self, x):
x = nn.functional.relu(self.dense1(x))
x = nn.functional.sigmoid(self.dense2(x))
return x.view(x.shape[0], *self.image_shape)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'latent_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = tmp3 * tmp5
tl.store(in_out_ptr0 + x2, tmp3, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (784, 128), (128, 1))
assert_size_stride(primals_5, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128),
(1, 4), 0), out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(512)](buf1, primals_2, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (128, 784), (
1, 128), 0), out=buf2)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
triton_poi_fused_sigmoid_sigmoid_backward_1[grid(3136)](buf3,
primals_5, buf4, 3136, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return reinterpret_tensor(buf3, (4, 1, 28, 28), (784, 784, 28, 1), 0
), primals_3, buf1, buf4, primals_4
class MNISTGeneratorNew(nn.Module):
def __init__(self, latent_dim):
super(MNISTGeneratorNew, self).__init__()
self.image_shape = 1, 28, 28
self.latent_dim = latent_dim
self.dense1 = nn.Linear(self.latent_dim, 128, True)
self.dense2 = nn.Linear(128, 784, True)
def forward(self, input_0):
primals_1 = self.dense1.weight
primals_2 = self.dense1.bias
primals_4 = self.dense2.weight
primals_5 = self.dense2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
RobinMaas95/GTSRB_Visualization
|
MNISTGenerator
| false
| 1,109
|
[
"MIT"
] | 0
|
fa837ff94e089a936ef4f4418970d262b35f70b6
|
https://github.com/RobinMaas95/GTSRB_Visualization/tree/fa837ff94e089a936ef4f4418970d262b35f70b6
|
Conv2d
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True
).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1
) + 1e-05
weight = weight / std.expand_as(weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x2, tmp36, xmask)
@triton.jit
def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tl.where(xmask, tmp11, 0)
tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 / tmp19
tmp21 = tmp11 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.where(xmask, tmp23, 0)
tmp26 = tl.sum(tmp25, 1)[:, None]
tmp27 = 63.0
tmp28 = tmp26 / tmp27
tmp29 = libdevice.sqrt(tmp28)
tmp30 = 1e-05
tmp31 = tmp29 + tmp30
tmp32 = tmp10 / tmp31
tl.store(out_ptr0 + (r1 + 64 * x0), tmp10, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp32, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = buf3
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_mean_std_sub_1[grid(4)](buf5, primals_1, buf0,
buf1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(16)](buf8, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf8, primals_1, primals_3, buf5, buf6
class Conv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
T1anZhenYu/pytorch-classification
|
Conv2d
| false
| 1,110
|
[
"MIT"
] | 0
|
ad68e09f20a98541bcb437a7df8e7d14e8c21636
|
https://github.com/T1anZhenYu/pytorch-classification/tree/ad68e09f20a98541bcb437a7df8e7d14e8c21636
|
lovasz_hinge
|
import torch
import torch.nn.parallel
import torch.utils.data
from torchvision.transforms import functional as F
import torch.nn.functional as F
from torch.autograd import Variable
def flatten_binary_scores(scores, labels, ignore=255):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts.float() - gt_sorted.float().cumsum(0)
union = gts.float() + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
class lovasz_hinge(torch.nn.Module):
def __init__(self, per_img=False, ignore=255):
"""
:param weight: 1D weight vector to deal with the class-imbalance
"""
super().__init__()
self.per_image = per_img
self.ignore = ignore
def lovasz_hinge_flat(self, logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def forward(self, logits, labels):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if self.per_image:
loss = mean(self.lovasz_hinge_flat(*flatten_binary_scores(log.
unsqueeze(0), lab.unsqueeze(0), self.ignore)) for log, lab in
zip(logits, labels))
else:
loss = self.lovasz_hinge_flat(*flatten_binary_scores(logits,
labels, self.ignore))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.utils.data
from torchvision.transforms import functional as F
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 255.0
tmp2 = tmp0 != tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256,), (1,), torch.bool)
get_raw_stream(0)
triton_poi_fused_ne_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(arg0_1, (256,), (1,), 0
), buf0, reinterpret_tensor(arg1_1, (256,), (1,), 0)
def flatten_binary_scores(scores, labels, ignore=255):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts.float() - gt_sorted.float().cumsum(0)
union = gts.float() + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
class lovasz_hingeNew(torch.nn.Module):
def __init__(self, per_img=False, ignore=255):
"""
:param weight: 1D weight vector to deal with the class-imbalance
"""
super().__init__()
self.per_image = per_img
self.ignore = ignore
def lovasz_hinge_flat(self, logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PhillipHuang2017/ext_portrait_segmentation
|
lovasz_hinge
| false
| 1,111
|
[
"MIT"
] | 0
|
6d0cec0a953dacbc94a01ea8b719feb687b7c029
|
https://github.com/PhillipHuang2017/ext_portrait_segmentation/tree/6d0cec0a953dacbc94a01ea8b719feb687b7c029
|
AlignEA
|
import torch
import torch.nn.functional as F
class AlignEA(torch.nn.Module):
def __init__(self, p, feat_drop, params):
super(AlignEA, self).__init__()
self.params = params
def forward(self, e1, r, e2):
return torch.sum(torch.pow(e1 + r - e2, 2), 1)
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score - self.params[0]).sum() + self.params[1
] * F.relu(self.params[2] - neg_score).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4, 'feat_drop': 4, 'params': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp5 + tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp12 + tmp18
tmp22 = tmp20 + tmp21
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp19 + tmp25
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, arg2_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class AlignEANew(torch.nn.Module):
def __init__(self, p, feat_drop, params):
super(AlignEANew, self).__init__()
self.params = params
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score - self.params[0]).sum() + self.params[1
] * F.relu(self.params[2] - neg_score).sum()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
TMUITLab/EAFR
|
AlignEA
| false
| 1,112
|
[
"MIT"
] | 0
|
dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
fpn_module
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class fpn_module(nn.Module):
def __init__(self, numClass):
super(fpn_module, self).__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0
)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0
)
self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride=
1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear', align_corners=True
)
p4 = F.interpolate(p4, size=(H, W), mode='bilinear', align_corners=True
)
p3 = F.interpolate(p3, size=(H, W), mode='bilinear', align_corners=True
)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners
=True) + y
def forward(self, c2, c3, c4, c5):
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
p5 = self.smooth1_2(self.smooth1_1(p5))
p4 = self.smooth2_2(self.smooth2_1(p4))
p3 = self.smooth3_2(self.smooth3_1(p3))
p2 = self.smooth4_2(self.smooth4_1(p2))
output = self.classify(self._concatenate(p5, p4, p3, p2))
return output
def get_inputs():
return [torch.rand([4, 256, 64, 64]), torch.rand([4, 512, 64, 64]),
torch.rand([4, 1024, 64, 64]), torch.rand([4, 2048, 64, 64])]
def get_init_inputs():
return [[], {'numClass': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 63, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_3(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = triton_helpers.minimum(tmp9, tmp2)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x6 = xindex
x3 = xindex // 4096 % 256
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + x6, None)
tmp33 = tl.load(in_ptr7 + x3, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x2), None,
eviction_policy='evict_last')
tmp11 = tmp10 + tmp1
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr2 + (tmp13 + 64 * tmp4 + 4096 * x2), None,
eviction_policy='evict_last')
tmp15 = tmp14 - tmp9
tmp17 = tmp15 * tmp16
tmp18 = tmp9 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = tmp19 < 0
tmp22 = tl.where(tmp21, tmp20, tmp19)
tmp23 = tl.load(in_ptr2 + (tmp8 + 64 * tmp22 + 4096 * x2), None,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (tmp13 + 64 * tmp22 + 4096 * x2), None,
eviction_policy='evict_last')
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp16
tmp27 = tmp23 + tmp26
tmp28 = tmp27 - tmp18
tmp30 = tmp28 * tmp29
tmp31 = tmp18 + tmp30
tmp34 = tmp32 + tmp33
tmp35 = tmp31 + tmp34
tl.store(in_out_ptr0 + x6, tmp35, None)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 128
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr11 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tl.load(in_ptr8 + (tmp8 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr8 + (tmp15 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp37
tmp41 = tmp40 - tmp38
tmp42 = tmp41 * tmp19
tmp43 = tmp38 + tmp42
tmp44 = tl.load(in_ptr8 + (tmp8 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp45 = tmp44 + tmp37
tmp46 = tl.load(in_ptr8 + (tmp15 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp47 = tmp46 + tmp37
tmp48 = tmp47 - tmp45
tmp49 = tmp48 * tmp19
tmp50 = tmp45 + tmp49
tmp51 = tmp50 - tmp43
tmp52 = tmp51 * tmp34
tmp53 = tl.load(in_ptr10 + (tmp8 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp55 = tmp53 + tmp54
tmp56 = tl.load(in_ptr10 + (tmp15 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp57 = tmp56 + tmp54
tmp58 = tmp57 - tmp55
tmp59 = tmp58 * tmp19
tmp60 = tmp55 + tmp59
tmp61 = tl.load(in_ptr10 + (tmp8 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp62 = tmp61 + tmp54
tmp63 = tl.load(in_ptr10 + (tmp15 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp64 = tmp63 + tmp54
tmp65 = tmp64 - tmp62
tmp66 = tmp65 * tmp19
tmp67 = tmp62 + tmp66
tmp68 = tmp67 - tmp60
tmp69 = tmp68 * tmp34
tl.store(out_ptr0 + x6, tmp21, None)
tl.store(out_ptr1 + x6, tmp35, None)
tl.store(out_ptr2 + x6, tmp43, None)
tl.store(out_ptr3 + x6, tmp52, None)
tl.store(out_ptr4 + x6, tmp60, None)
tl.store(out_ptr5 + x6, tmp69, None)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 512
x0 = xindex % 4096
x2 = xindex // 2097152
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 524288 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 524288 * x2), tmp4, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 256, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-128 + x1) + 524288 * x2),
tmp13, other=0.0)
tmp15 = tl.load(in_ptr3 + (x0 + 4096 * (-128 + x1) + 524288 * x2),
tmp13, other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 384, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-256 + x1) + 524288 * x2),
tmp22, other=0.0)
tmp24 = tl.load(in_ptr5 + (x0 + 4096 * (-256 + x1) + 524288 * x2),
tmp22, other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 512, tl.int64)
tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-384 + x1) + 524288 * x2),
tmp28, other=0.0)
tmp32 = tl.load(in_ptr7 + (-384 + x1), tmp28, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tl.store(out_ptr0 + x3, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (256, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1))
assert_size_stride(primals_4, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_7, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_10, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_13, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_14, (256,), (1,))
assert_size_stride(primals_15, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_16, (128,), (1,))
assert_size_stride(primals_17, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_18, (256,), (1,))
assert_size_stride(primals_19, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (128,), (1,))
assert_size_stride(primals_21, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_22, (256,), (1,))
assert_size_stride(primals_23, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_24, (128,), (1,))
assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_26, (256,), (1,))
assert_size_stride(primals_27, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (128,), (1,))
assert_size_stride(primals_29, (4, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(4194304)](buf1, primals_2,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_1[grid(64)](buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_2[grid(64)](buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_1[grid(64)](buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_2[grid(64)](buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_3[grid(64)](buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_3[grid(64)](buf9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf10 = buf2
del buf2
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304)
](buf10, buf3, buf5, buf1, buf6, buf7, buf4, buf9, primals_5,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf11 = extern_kernels.convolution(primals_9, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf13 = buf11
del buf11
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304)
](buf13, buf3, buf5, buf10, buf6, buf7, buf4, buf9, primals_8,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_8
buf14 = extern_kernels.convolution(primals_12, primals_10, stride=(
1, 1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf16 = buf14
del buf14
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304)
](buf16, buf3, buf5, buf13, buf6, buf7, buf4, buf9, primals_11,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf1, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_0[grid(4194304)](buf18, primals_14,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_14
buf19 = extern_kernels.convolution(buf18, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf20 = extern_kernels.convolution(buf10, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_0[grid(4194304)](buf21, primals_18,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_18
buf22 = extern_kernels.convolution(buf21, primals_19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf23 = extern_kernels.convolution(buf13, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_0[grid(4194304)](buf24, primals_22,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_22
buf25 = extern_kernels.convolution(buf24, primals_23, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf26 = extern_kernels.convolution(buf16, primals_25, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_0[grid(4194304)](buf27, primals_26,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_26
buf28 = extern_kernels.convolution(buf27, primals_27, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf29 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf30 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf31 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf32 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf33 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf34 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_5[grid(2097152)
](buf3, buf5, buf19, primals_16, buf6, buf7, buf4, buf9, buf22,
primals_20, buf25, primals_24, buf29, buf30, buf31, buf32,
buf33, buf34, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
del buf19
del buf22
del buf25
del primals_16
del primals_20
del primals_24
buf35 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_6[grid(8388608)](buf29, buf30, buf31, buf32,
buf33, buf34, buf28, primals_28, buf35, 8388608, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf28
del buf29
del buf30
del buf31
del buf32
del buf33
del buf34
del primals_28
buf36 = extern_kernels.convolution(buf35, primals_29, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_7[grid(65536)](buf37, primals_30,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_30
return (buf37, primals_1, primals_3, primals_4, primals_6, primals_7,
primals_9, primals_10, primals_12, primals_13, primals_15,
primals_17, primals_19, primals_21, primals_23, primals_25,
primals_27, primals_29, buf1, buf3, buf4, buf5, buf6, buf7, buf9,
buf10, buf13, buf16, buf18, buf21, buf24, buf27, buf35)
class fpn_moduleNew(nn.Module):
def __init__(self, numClass):
super(fpn_moduleNew, self).__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0
)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0
)
self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride=
1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear', align_corners=True
)
p4 = F.interpolate(p4, size=(H, W), mode='bilinear', align_corners=True
)
p3 = F.interpolate(p3, size=(H, W), mode='bilinear', align_corners=True
)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners
=True) + y
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.toplayer.weight
primals_2 = self.toplayer.bias
primals_13 = self.smooth1_1.weight
primals_5 = self.smooth1_1.bias
primals_17 = self.smooth2_1.weight
primals_8 = self.smooth2_1.bias
primals_21 = self.smooth3_1.weight
primals_11 = self.smooth3_1.bias
primals_25 = self.smooth4_1.weight
primals_14 = self.smooth4_1.bias
primals_15 = self.smooth1_2.weight
primals_16 = self.smooth1_2.bias
primals_19 = self.smooth2_2.weight
primals_20 = self.smooth2_2.bias
primals_23 = self.smooth3_2.weight
primals_24 = self.smooth3_2.bias
primals_27 = self.smooth4_2.weight
primals_28 = self.smooth4_2.bias
primals_4 = self.latlayer1.weight
primals_18 = self.latlayer1.bias
primals_7 = self.latlayer2.weight
primals_22 = self.latlayer2.bias
primals_10 = self.latlayer3.weight
primals_26 = self.latlayer3.bias
primals_29 = self.classify.weight
primals_30 = self.classify.bias
primals_12 = input_0
primals_9 = input_1
primals_6 = input_2
primals_3 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0]
|
LOUEY233/CPS3320_python
|
fpn_module
| false
| 1,113
|
[
"MIT"
] | 0
|
3cc1733d91c3a8f680eeb984348e2a52ae3285ec
|
https://github.com/LOUEY233/CPS3320_python/tree/3cc1733d91c3a8f680eeb984348e2a52ae3285ec
|
Bilinear
|
import torch
import torch.nn as nn
class Bilinear(nn.Module):
def __init__(self, size):
super(Bilinear, self).__init__()
self.size = size
self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size))
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def forward(self, vector1, vector2):
bma = torch.matmul(vector1, self.mat).unsqueeze(1)
ba = torch.matmul(bma, vector2.unsqueeze(2)).view(-1, 1)
return ba
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](primals_3, buf2, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (1024, 1), (1, 1), 0), reinterpret_tensor(
buf2, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (4,
64), (1, 4), 0)
class BilinearNew(nn.Module):
def __init__(self, size):
super(BilinearNew, self).__init__()
self.size = size
self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size))
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def forward(self, input_0, input_1):
primals_1 = self.mat
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TRUMANCFY/VL-DIORA
|
Bilinear
| false
| 1,114
|
[
"Apache-2.0"
] | 0
|
cef398e05842d4a30345260d8e27d1c362671834
|
https://github.com/TRUMANCFY/VL-DIORA/tree/cef398e05842d4a30345260d8e27d1c362671834
|
N_TransE
|
import torch
import torch.nn.functional as F
class N_TransE(torch.nn.Module):
def __init__(self, p, params):
super(N_TransE, self).__init__()
self.p = p
self.params = params
def forward(self, e1, r, e2):
pred = -torch.norm(e1 + r - e2, p=self.p, dim=1)
return pred
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score + self.params[0] - neg_score).sum(
) + self.params[1] * F.relu(pos_score - self.params[2]).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4, 'params': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_neg_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tmp5 * tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp28 * tmp28
tmp30 = tmp22 + tmp29
tmp31 = 0.25
tmp32 = libdevice.pow(tmp30, tmp31)
tmp33 = -tmp32
tl.store(in_out_ptr0 + x2, tmp33, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_linalg_vector_norm_neg_sub_0[grid(64)](buf1,
arg0_1, arg1_1, arg2_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class N_TransENew(torch.nn.Module):
def __init__(self, p, params):
super(N_TransENew, self).__init__()
self.p = p
self.params = params
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score + self.params[0] - neg_score).sum(
) + self.params[1] * F.relu(pos_score - self.params[2]).sum()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
TMUITLab/EAFR
|
N_TransE
| false
| 1,115
|
[
"MIT"
] | 0
|
dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
FM
|
import torch
import torch.nn as nn
from sklearn.metrics import *
class FM(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FM, self).__init__()
def forward(self, inputs):
fm_input = inputs
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False)
return cross_term
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp16 * tmp16
tmp25 = tmp17 * tmp17
tmp26 = tmp24 + tmp25
tmp27 = tmp19 * tmp19
tmp28 = tmp26 + tmp27
tmp29 = tmp21 * tmp21
tmp30 = tmp28 + tmp29
tmp31 = tmp23 - tmp30
tmp32 = tmp15 + tmp31
tmp35 = tmp33 + tmp34
tmp37 = tmp35 + tmp36
tmp39 = tmp37 + tmp38
tmp40 = tmp39 * tmp39
tmp41 = tmp33 * tmp33
tmp42 = tmp34 * tmp34
tmp43 = tmp41 + tmp42
tmp44 = tmp36 * tmp36
tmp45 = tmp43 + tmp44
tmp46 = tmp38 * tmp38
tmp47 = tmp45 + tmp46
tmp48 = tmp40 - tmp47
tmp49 = tmp32 + tmp48
tmp52 = tmp50 + tmp51
tmp54 = tmp52 + tmp53
tmp56 = tmp54 + tmp55
tmp57 = tmp56 * tmp56
tmp58 = tmp50 * tmp50
tmp59 = tmp51 * tmp51
tmp60 = tmp58 + tmp59
tmp61 = tmp53 * tmp53
tmp62 = tmp60 + tmp61
tmp63 = tmp55 * tmp55
tmp64 = tmp62 + tmp63
tmp65 = tmp57 - tmp64
tmp66 = tmp49 + tmp65
tmp67 = 0.5
tmp68 = tmp66 * tmp67
tl.store(in_out_ptr0 + x2, tmp68, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf1,
class FMNew(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FMNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Sunmyunghan/Final_Project
|
FM
| false
| 1,117
|
[
"MIT"
] | 0
|
28cde293dc6d07521b2e1c5613b20444aea91d21
|
https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21
|
VertexDirectEmbedder
|
import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedder(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedder, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
def forward(self) ->torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def get_inputs():
return []
def get_init_inputs():
return [[], {'num_vertices': 4, 'embed_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf0, primals_1
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedderNew(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedderNew, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def forward(self):
primals_1 = self.embeddings
output = call([primals_1])
return output[0]
|
TWJianNuo/detectron2
|
VertexDirectEmbedder
| false
| 1,118
|
[
"Apache-2.0"
] | 0
|
091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0
|
https://github.com/TWJianNuo/detectron2/tree/091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0
|
HighWay
|
import torch
import torch.nn as nn
from torch.nn import Parameter
class HighWay(torch.nn.Module):
def __init__(self, f_in, f_out, bias=True):
super(HighWay, self).__init__()
self.w = Parameter(torch.Tensor(f_in, f_out))
nn.init.xavier_uniform_(self.w)
if bias:
self.bias = Parameter(torch.Tensor(f_out))
nn.init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
def forward(self, in_1, in_2):
t = torch.mm(in_1, self.w)
if self.bias is not None:
t = t + self.bias
gate = torch.sigmoid(t)
return gate * in_2 + (1.0 - gate) * in_1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'f_in': 4, 'f_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp8 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp3
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_0[grid(16)](buf0, primals_3,
primals_4, primals_2, buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf1, primals_2, primals_3, primals_4, buf0
class HighWayNew(torch.nn.Module):
def __init__(self, f_in, f_out, bias=True):
super(HighWayNew, self).__init__()
self.w = Parameter(torch.Tensor(f_in, f_out))
nn.init.xavier_uniform_(self.w)
if bias:
self.bias = Parameter(torch.Tensor(f_out))
nn.init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
def forward(self, input_0, input_1):
primals_1 = self.w
primals_3 = self.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
TMUITLab/EAFR
|
HighWay
| false
| 1,119
|
[
"MIT"
] | 0
|
dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
Network
|
import torch
import torch.nn as nn
from torch.nn.functional import relu
from torch.nn.functional import softmax
class Network(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.fc1 = nn.Linear(in_features=input_size, out_features=64)
self.fc2 = nn.Linear(in_features=64, out_features=64)
self.out = nn.Linear(in_features=64, out_features=output_size)
def forward(self, t):
t = relu(self.fc1(t))
t = relu(self.fc2(t))
t = softmax(self.out(t), dim=0)
return t
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf8, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class NetworkNew(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.fc1 = nn.Linear(in_features=input_size, out_features=64)
self.fc2 = nn.Linear(in_features=64, out_features=64)
self.out = nn.Linear(in_features=64, out_features=output_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
THE-RAF/Reinforcement-Learning
|
Network
| false
| 1,120
|
[
"MIT"
] | 0
|
36b4c5330740b533fb8170263f995afb91a1d021
|
https://github.com/THE-RAF/Reinforcement-Learning/tree/36b4c5330740b533fb8170263f995afb91a1d021
|
SpatialCrossMapLRN
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class SpatialCrossMapLRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SpatialCrossMapLRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tagussan/pretrained-models.pytorch
|
SpatialCrossMapLRN
| false
| 1,121
|
[
"BSD-3-Clause"
] | 0
|
854e6c153c2534dd7cf76a5ec102307ea5171167
|
https://github.com/Tagussan/pretrained-models.pytorch/tree/854e6c153c2534dd7cf76a5ec102307ea5171167
|
MLPBase
|
import torch
from torch import nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (4, 300), (300, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf7, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf6, 19200, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, primals_6, buf6, primals_4, buf7
class MLPBaseNew(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBaseNew, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
TachikakaMin/dreamer-torch
|
MLPBase
| false
| 1,122
|
[
"MIT"
] | 0
|
3c99526f4507e28cf8b34ada0321001adcf8ae1f
|
https://github.com/TachikakaMin/dreamer-torch/tree/3c99526f4507e28cf8b34ada0321001adcf8ae1f
|
N_R_Align
|
import torch
import torch.nn as nn
class N_R_Align(torch.nn.Module):
def __init__(self, params):
super(N_R_Align, self).__init__()
self.params = params
self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06)
def forward(self, e1, e2, n1, n2):
return self.params * torch.sigmoid(self.cos_sim(n1, n2)) + (1 -
self.params) * torch.sigmoid(self.cos_sim(e1, e2))
def loss(self, pos_score, neg_score, target):
return -torch.log(pos_score).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'params': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x3, xmask)
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = 4.0
tmp9 = tmp7 * tmp8
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = -3.0
tmp19 = tmp17 * tmp18
tmp20 = tmp9 + tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg3_1, arg2_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del arg3_1
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sum_1[grid(64)](buf0, buf1, buf2,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
return buf2,
class N_R_AlignNew(torch.nn.Module):
def __init__(self, params):
super(N_R_AlignNew, self).__init__()
self.params = params
self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06)
def loss(self, pos_score, neg_score, target):
return -torch.log(pos_score).sum()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
TMUITLab/EAFR
|
N_R_Align
| false
| 1,123
|
[
"MIT"
] | 0
|
dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
|
FC
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FC(nn.Module):
def __init__(self, in_channels, out_channels, use_bias=False,
activation='LR', gain=2 ** 0.5):
super(FC, self).__init__()
self.he_std = in_channels * -0.5 * gain
self.weight = torch.nn.Parameter(torch.randn(out_channels,
in_channels) * self.he_std)
if use_bias:
self.bias = torch.nn.Parameter(torch.zeros(out_channels))
else:
self.bias = None
if activation == 'LR':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'Sigmoid':
self.activation = nn.Sigmoid()
elif activation is None:
self.activation = None
else:
assert 0, " STGAN's FC reruires LR or Sigmoid, not{}".format(
activation)
def forward(self, x):
if self.bias is not None:
out = F.linear(x, self.weight, self.bias)
else:
out = F.linear(x, self.weight)
if self.activation:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = tmp5 > tmp1
tl.store(out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_view_0[grid(256)](buf1,
buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3
class FCNew(nn.Module):
def __init__(self, in_channels, out_channels, use_bias=False,
activation='LR', gain=2 ** 0.5):
super(FCNew, self).__init__()
self.he_std = in_channels * -0.5 * gain
self.weight = torch.nn.Parameter(torch.randn(out_channels,
in_channels) * self.he_std)
if use_bias:
self.bias = torch.nn.Parameter(torch.zeros(out_channels))
else:
self.bias = None
if activation == 'LR':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'Sigmoid':
self.activation = nn.Sigmoid()
elif activation is None:
self.activation = None
else:
assert 0, " STGAN's FC reruires LR or Sigmoid, not{}".format(
activation)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
TOMeoww/STGAN
|
FC
| false
| 1,124
|
[
"MIT"
] | 0
|
090a4024999e68f017140312ecfdd0d4dc3dc425
|
https://github.com/TOMeoww/STGAN/tree/090a4024999e68f017140312ecfdd0d4dc3dc425
|
Mean
|
import torch
class Mean(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
_std, mean = torch.std_mean(x, self.dim)
return mean
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_std_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
get_raw_stream(0)
triton_poi_fused_std_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class MeanNew(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tahlor/glom-pytorch
|
Mean
| false
| 1,125
|
[
"MIT"
] | 0
|
45b2fc52af5288cd53611e497a70d53ffa303410
|
https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410
|
LinearModel
|
import torch
class LinearModel(torch.nn.Module):
def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float'
):
super().__init__()
self.linear = torch.nn.Linear(input_size, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, data):
data = data[:, 0, :]
hidden = self.dropout(data)
output = self.linear(hidden)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
class LinearModelNew(torch.nn.Module):
def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float'
):
super().__init__()
self.linear = torch.nn.Linear(input_size, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TDteach/SEAM
|
LinearModel
| false
| 1,126
|
[
"MIT"
] | 0
|
231447dad15403e7620adcf6629b6e7fccc4b809
|
https://github.com/TDteach/SEAM/tree/231447dad15403e7620adcf6629b6e7fccc4b809
|
GeometricMean
|
import torch
import torch.nn.functional as F
class GeometricMean(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
log_x = torch.log(F.relu(x))
return torch.exp(torch.mean(log_x, dim=self.dim))
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_log_mean_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl_math.log(tmp2)
tmp5 = triton_helpers.maximum(tmp1, tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 + tmp6
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = tl_math.log(tmp9)
tmp11 = tmp7 + tmp10
tmp13 = triton_helpers.maximum(tmp1, tmp12)
tmp14 = tl_math.log(tmp13)
tmp15 = tmp11 + tmp14
tmp16 = 4.0
tmp17 = tmp15 / tmp16
tmp18 = tl_math.exp(tmp17)
tl.store(out_ptr0 + x0, tmp18, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_log_mean_relu_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GeometricMeanNew(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tahlor/glom-pytorch
|
GeometricMean
| false
| 1,127
|
[
"MIT"
] | 0
|
45b2fc52af5288cd53611e497a70d53ffa303410
|
https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410
|
MinibatchStd
|
import torch
import torch.nn as nn
class MinibatchStd(nn.Module):
"""
calculate minibatch std to avoid mode collapse
"""
def __init__(self):
super(MinibatchStd, self).__init__()
def forward(self, x):
size = list(x.size())
size[1] = 1
std = torch.std(x, dim=0)
mean = torch.mean(std)
return torch.cat((x, mean.repeat(size)), dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_repeat_std_0(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = 3.0
tmp21 = tmp19 / tmp20
tmp22 = libdevice.sqrt(tmp21)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.sum(tmp23, 1)[:, None]
tmp26 = 64.0
tmp27 = tmp25 / tmp26
tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]),
tmp27, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64)
get_raw_stream(0)
triton_per_fused_mean_repeat_std_0[grid(1)](arg0_1, buf2, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class MinibatchStdNew(nn.Module):
"""
calculate minibatch std to avoid mode collapse
"""
def __init__(self):
super(MinibatchStdNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tak-jae-ho/RGBD-GAN-pytorch
|
MinibatchStd
| false
| 1,128
|
[
"MIT"
] | 0
|
4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
|
https://github.com/Tak-jae-ho/RGBD-GAN-pytorch/tree/4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
|
PixelwiseNorm
|
import torch
import torch.nn as nn
class PixelwiseNorm(nn.Module):
"""
layer pixelwise normalization
"""
def __init__(self, eps=1e-07):
super(PixelwiseNorm, self).__init__()
self.eps = eps
def forward(self, x):
return x / torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True) + self.eps
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-07
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_pow_sqrt_sum_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelwiseNormNew(nn.Module):
"""
layer pixelwise normalization
"""
def __init__(self, eps=1e-07):
super(PixelwiseNormNew, self).__init__()
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tak-jae-ho/RGBD-GAN-pytorch
|
PixelwiseNorm
| false
| 1,129
|
[
"MIT"
] | 0
|
4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
|
https://github.com/Tak-jae-ho/RGBD-GAN-pytorch/tree/4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
|
ConsensusAttention
|
import torch
import torch.nn.functional as F
from torch import nn
from torch import einsum
class ConsensusAttention(nn.Module):
def __init__(self, num_patches_side, attend_self=True,
local_consensus_radius=0):
super().__init__()
self.attend_self = attend_self
self.local_consensus_radius = local_consensus_radius
if self.local_consensus_radius > 0:
coors = torch.stack(torch.meshgrid(torch.arange(
num_patches_side), torch.arange(num_patches_side))).float()
coors = rearrange(coors, 'c h w -> (h w) c')
dist = torch.cdist(coors, coors)
mask_non_local = dist > self.local_consensus_radius
mask_non_local = rearrange(mask_non_local, 'i j -> () i j')
self.register_buffer('non_local_mask', mask_non_local)
def forward(self, levels):
_, n, _, d, device = *levels.shape, levels.device
q, k, _v = levels, F.normalize(levels, dim=-1), levels
sim = einsum('b i l d, b j l d -> b l i j', q, k
) * d ** -0.5
if not self.attend_self:
self_mask = torch.eye(n, device=device, dtype=torch.bool)
self_mask = rearrange(self_mask, 'i j -> () () i j')
sim.masked_fill_(self_mask, TOKEN_ATTEND_SELF_VALUE)
if self.local_consensus_radius > 0:
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(self.non_local_mask, max_neg_value)
attn = sim.softmax(dim=-1)
out = einsum('b l i j, b j l d -> b i l d', attn, levels)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_patches_side': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
tl.store(out_ptr1 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = yindex // 16
y4 = yindex % 16
y1 = yindex // 4 % 4
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y4 + 16 * x3 + 64 * y2), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x3 + 64 * y2), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x3 + 64 * y2), xmask &
ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x3 + 64 * y2), xmask &
ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x3 + 64 * y2), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3 + 4 * y5), tmp15, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, buf5, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
triton_poi_fused_clone_1[grid(64, 4)](arg0_1, buf1, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
del buf0
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_3[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6)
del buf4
del buf5
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 4, 16, 1), 0),
class ConsensusAttentionNew(nn.Module):
def __init__(self, num_patches_side, attend_self=True,
local_consensus_radius=0):
super().__init__()
self.attend_self = attend_self
self.local_consensus_radius = local_consensus_radius
if self.local_consensus_radius > 0:
coors = torch.stack(torch.meshgrid(torch.arange(
num_patches_side), torch.arange(num_patches_side))).float()
coors = rearrange(coors, 'c h w -> (h w) c')
dist = torch.cdist(coors, coors)
mask_non_local = dist > self.local_consensus_radius
mask_non_local = rearrange(mask_non_local, 'i j -> () i j')
self.register_buffer('non_local_mask', mask_non_local)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tahlor/glom-pytorch
|
ConsensusAttention
| false
| 1,130
|
[
"MIT"
] | 0
|
45b2fc52af5288cd53611e497a70d53ffa303410
|
https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410
|
DenseCrossEntropy
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
class DenseCrossEntropy(nn.Module):
def forward(self, x, target):
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
loss = -logprobs * target
loss = loss.sum(-1)
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp13 = -tmp12
tmp15 = tmp13 * tmp14
tmp16 = tmp2 - tmp11
tmp17 = -tmp16
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp21 = tmp5 - tmp11
tmp22 = -tmp21
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp8 - tmp11
tmp27 = -tmp26
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3,
buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class DenseCrossEntropyNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution
|
DenseCrossEntropy
| false
| 1,131
|
[
"Apache-2.0"
] | 0
|
8e2d9056d5c88c6415827086809e73522b336fbb
|
https://github.com/Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution/tree/8e2d9056d5c88c6415827086809e73522b336fbb
|
HalfMSELoss
|
import torch
from torch.nn.modules.loss import MSELoss
class HalfMSELoss(MSELoss):
def __init__(self, reduction='mean'):
super().__init__(reduction=reduction)
def forward(self, input, target):
return super().forward(input, target) / 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import MSELoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class HalfMSELossNew(MSELoss):
def __init__(self, reduction='mean'):
super().__init__(reduction=reduction)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ThayaFluss/candle
|
HalfMSELoss
| false
| 1,132
|
[
"MIT"
] | 0
|
4a12fde60ffbbf0cb688617fee81aded94c0b613
|
https://github.com/ThayaFluss/candle/tree/4a12fde60ffbbf0cb688617fee81aded94c0b613
|
EqualLinear
|
import torch
from torch import nn
import torch.nn.functional as F
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias *
self.lr_mul)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class EqualLinearNew(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TheSignPainter/AGGAN
|
EqualLinear
| false
| 1,133
|
[
"Apache-2.0"
] | 0
|
d75144f81df3f5a0a761d48c6285c38e74002be3
|
https://github.com/TheSignPainter/AGGAN/tree/d75144f81df3f5a0a761d48c6285c38e74002be3
|
SuperPointNet
|
import torch
import torch.optim
import torch.utils.data
class SuperPointNet(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNet, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1,
padding=0)
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1,
padding=0)
def forward(self, x):
""" Forward pass that jointly computes unprocessed point and descriptor
tensors.
Input
x: Image pytorch tensor shaped N x 1 x H x W.
Output
semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8.
desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8.
"""
x = self.relu(self.conv1a(x))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
cPa = self.relu(self.convPa(x))
semi = self.convPb(cPa)
cDa = self.relu(self.convDa(x))
desc = self.convDb(cDa)
dn = torch.norm(desc, p=2, dim=1)
desc = desc.div(torch.unsqueeze(dn, 1))
return semi, desc
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 8
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_13(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 260
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 65
y1 = yindex // 65
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 65 * x2 + 4160 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused_convolution_linalg_vector_norm_14(in_out_ptr0,
in_out_ptr1, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_div_15(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 16384 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 64 * y1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (65, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_21, (65,), (1,))
assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_25, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 9)](primals_4, buf0, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_8, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_10, buf3, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf7 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(32768, 9)](primals_18, buf7, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf8 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(32768, 9)](primals_22, buf8, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf9 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_convolution_relu_4[grid(256, 4096)](buf9,
primals_2, buf10, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf9
del primals_2
buf11 = extern_kernels.convolution(buf10, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_5[grid(1048576)](buf12, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf12,
buf13, buf14, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf15 = extern_kernels.convolution(buf13, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_7[grid(262144)](buf16, primals_7,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_7[grid(262144)](buf18, primals_9,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
buf20 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(65536)](buf18,
buf19, buf20, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_9[grid(131072)](buf22, primals_11,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_9[grid(131072)](buf24, primals_13,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(32768)](buf24,
buf25, buf26, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf28 = buf27
del buf27
triton_poi_fused_convolution_relu_11[grid(32768)](buf28, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf30 = buf29
del buf29
triton_poi_fused_convolution_relu_11[grid(32768)](buf30, primals_17,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf32 = buf31
del buf31
triton_poi_fused_convolution_relu_12[grid(65536)](buf32, primals_19,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf33 = extern_kernels.convolution(buf32, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 65, 8, 8), (4160, 1, 520, 65))
buf34 = empty_strided_cuda((4, 65, 8, 8), (4160, 64, 8, 1), torch.
float32)
triton_poi_fused_convolution_13[grid(260, 64)](buf33, primals_21,
buf34, 260, 64, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1)
del buf33
del primals_21
buf35 = extern_kernels.convolution(buf30, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf36 = buf35
del buf35
triton_poi_fused_convolution_relu_12[grid(65536)](buf36, primals_23,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_23
buf37 = extern_kernels.convolution(buf36, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf38 = buf37
del buf37
buf39 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
buf40 = buf39
del buf39
triton_per_fused_convolution_linalg_vector_norm_14[grid(256)](buf38,
buf40, primals_25, 256, 256, num_warps=2, num_stages=1)
del primals_25
buf41 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_div_15[grid(1024, 64)](buf38, buf40, buf41, 1024,
64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
return (buf34, buf41, primals_1, primals_3, buf0, buf1, buf2, buf3,
buf4, buf5, buf6, buf7, primals_20, buf8, primals_24, buf10, buf12,
buf13, buf14, buf16, buf18, buf19, buf20, buf22, buf24, buf25,
buf26, buf28, buf30, buf32, buf36, buf38, reinterpret_tensor(buf40,
(4, 1, 8, 8), (64, 64, 8, 1), 0))
class SuperPointNetNew(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNetNew, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1,
padding=0)
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1,
padding=0)
def forward(self, input_0):
primals_1 = self.conv1a.weight
primals_2 = self.conv1a.bias
primals_4 = self.conv1b.weight
primals_5 = self.conv1b.bias
primals_6 = self.conv2a.weight
primals_7 = self.conv2a.bias
primals_8 = self.conv2b.weight
primals_9 = self.conv2b.bias
primals_10 = self.conv3a.weight
primals_11 = self.conv3a.bias
primals_12 = self.conv3b.weight
primals_13 = self.conv3b.bias
primals_14 = self.conv4a.weight
primals_15 = self.conv4a.bias
primals_16 = self.conv4b.weight
primals_17 = self.conv4b.bias
primals_18 = self.convPa.weight
primals_19 = self.convPa.bias
primals_20 = self.convPb.weight
primals_21 = self.convPb.bias
primals_22 = self.convDa.weight
primals_23 = self.convDa.bias
primals_24 = self.convDb.weight
primals_25 = self.convDb.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0], output[1]
|
Sunny-Qin-0314/pytorch-superpoint
|
SuperPointNet
| false
| 1,134
|
[
"MIT"
] | 0
|
5c5325a1e5917afcc7469e137206990a8cd33725
|
https://github.com/Sunny-Qin-0314/pytorch-superpoint/tree/5c5325a1e5917afcc7469e137206990a8cd33725
|
ArcMarginProduct_subcenter
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.nn.parallel
class ArcMarginProduct_subcenter(nn.Module):
def __init__(self, in_features, out_features, k=3):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(out_features * k,
in_features))
self.reset_parameters()
self.k = k
self.out_features = out_features
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, features):
cosine_all = F.linear(F.normalize(features), F.normalize(self.weight))
cosine_all = cosine_all.view(-1, self.out_features, self.k)
cosine, _ = torch.max(cosine_all, dim=2)
return cosine
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 > tmp1
tmp6 = tmp0 == tmp1
tmp7 = tmp0 != tmp0
tmp8 = tmp1 != tmp1
tmp9 = tmp7 > tmp8
tmp10 = tmp5 | tmp9
tmp11 = tmp7 & tmp8
tmp12 = tmp6 | tmp11
tmp13 = tl.full([1], 0, tl.int64)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp13 < tmp14
tmp16 = tmp12 & tmp15
tmp17 = tmp10 | tmp16
tmp18 = tl.where(tmp17, tmp0, tmp1)
tmp19 = tl.where(tmp17, tmp13, tmp14)
tmp20 = tmp18 > tmp3
tmp21 = tmp18 == tmp3
tmp22 = tmp18 != tmp18
tmp23 = tmp3 != tmp3
tmp24 = tmp22 > tmp23
tmp25 = tmp20 | tmp24
tmp26 = tmp22 & tmp23
tmp27 = tmp21 | tmp26
tmp28 = tl.full([1], 2, tl.int64)
tmp29 = tmp19 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tmp25 | tmp30
tl.where(tmp31, tmp18, tmp3)
tmp33 = tl.where(tmp31, tmp19, tmp28)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp33, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(48)](primals_2, buf1, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 12), (1, 4), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.int64)
triton_poi_fused_max_2[grid(256)](buf2, buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf3, primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 1), 0)
class ArcMarginProduct_subcenterNew(nn.Module):
def __init__(self, in_features, out_features, k=3):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(out_features * k,
in_features))
self.reset_parameters()
self.k = k
self.out_features = out_features
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution
|
ArcMarginProduct_subcenter
| false
| 1,135
|
[
"Apache-2.0"
] | 0
|
8e2d9056d5c88c6415827086809e73522b336fbb
|
https://github.com/Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution/tree/8e2d9056d5c88c6415827086809e73522b336fbb
|
ChannelMixer
|
import torch
import torch.nn as nn
class ChannelMixer(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(ChannelMixer, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x):
input = x
x = self.fc1(x)
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
if self.dropout is not None:
x = self.dropout(x)
x = x + input
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_5, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class ChannelMixerNew(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(ChannelMixerNew, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TheRealMarVin/mlp-mixer
|
ChannelMixer
| false
| 1,136
|
[
"MIT"
] | 0
|
2124cb5c5adfc7af473cab535095471d4943adab
|
https://github.com/TheRealMarVin/mlp-mixer/tree/2124cb5c5adfc7af473cab535095471d4943adab
|
Net
|
import torch
from torch import nn
from torch.nn import functional as F
class Net(nn.Module):
def __init__(self, obs_dim, act_dim):
super(Net, self).__init__()
self.fc0 = nn.Linear(obs_dim, 128)
self.fc1 = nn.Linear(128, act_dim)
def forward(self, x):
x = x.type_as(self.fc0.bias)
x = F.relu(self.fc0(x))
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_dim': 4, 'act_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (128, 4), (4, 1))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), out=buf0)
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf3, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3
class NetNew(nn.Module):
def __init__(self, obs_dim, act_dim):
super(NetNew, self).__init__()
self.fc0 = nn.Linear(obs_dim, 128)
self.fc1 = nn.Linear(128, act_dim)
def forward(self, input_0):
primals_3 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TommeyChang/CS294-Homework
|
Net
| false
| 1,137
|
[
"MIT"
] | 0
|
17b525bf4366034b45c4febd89f1053d44550237
|
https://github.com/TommeyChang/CS294-Homework/tree/17b525bf4366034b45c4febd89f1053d44550237
|
ActorDownAction
|
import torch
from torch import nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class ActorDownAction(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs action"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_action,
max_children):
super(ActorDownAction, self).__init__()
self.max_action = max_action
self.action_base = MLPBase(self_input_dim + msg_dim, action_dim)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, x, m):
xm = torch.cat((x, m), dim=-1)
xm = torch.tanh(xm)
action = self.max_action * torch.tanh(self.action_base(xm))
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return action, msg_down
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4,
'max_action': 4, 'max_children': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (4, 300), (300, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400,), (1,))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300,), (1,))
assert_size_stride(primals_13, (16, 300), (300, 1))
assert_size_stride(primals_14, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf20 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_4, buf20, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_6, buf4, buf19, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_6
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7,
(300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_4[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_10, buf18, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_12, buf11, buf17, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_12
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=128, num_warps=4, num_stages=1)
del buf11
buf13 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(
primals_13, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf13)
del primals_14
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_per_fused_div_linalg_vector_norm_5[grid(64)](buf15, buf13,
buf16, 64, 16, XBLOCK=32, num_warps=4, num_stages=1)
return (buf7, buf16, reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, buf6,
reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, buf13,
buf15, primals_13, buf17, primals_11, buf18, primals_7, buf19,
primals_5, buf20)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class ActorDownActionNew(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs action"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_action,
max_children):
super(ActorDownActionNew, self).__init__()
self.max_action = max_action
self.action_base = MLPBase(self_input_dim + msg_dim, action_dim)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, input_0, input_1):
primals_3 = self.action_base.l1.weight
primals_4 = self.action_base.l1.bias
primals_5 = self.action_base.l2.weight
primals_6 = self.action_base.l2.bias
primals_7 = self.action_base.l3.weight
primals_8 = self.action_base.l3.bias
primals_9 = self.msg_base.l1.weight
primals_10 = self.msg_base.l1.bias
primals_11 = self.msg_base.l2.weight
primals_12 = self.msg_base.l2.bias
primals_13 = self.msg_base.l3.weight
primals_14 = self.msg_base.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
TachikakaMin/dreamer-torch
|
ActorDownAction
| false
| 1,138
|
[
"MIT"
] | 0
|
3c99526f4507e28cf8b34ada0321001adcf8ae1f
|
https://github.com/TachikakaMin/dreamer-torch/tree/3c99526f4507e28cf8b34ada0321001adcf8ae1f
|
UpsampleConvLayer
|
import torch
import torch.nn as nn
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super(UpsampleConvLayer, self).__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2)
self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x = nn.functional.interpolate(x, mode='nearest', scale_factor=self.
_scale_factor)
x = self._reflection_pad(x)
x = self._conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'scale_factor': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x5 = xindex
tmp0 = 3 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x1))
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = 3 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0))
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(1024)](primals_1
, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class UpsampleConvLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super(UpsampleConvLayerNew, self).__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2)
self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, input_0):
primals_1 = self._conv.weight
primals_3 = self._conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ThomasRanvier/cnn_style_transfer
|
UpsampleConvLayer
| false
| 1,139
|
[
"MIT"
] | 0
|
90b6c76c20263c22f4e45184d572284726ecbd7b
|
https://github.com/ThomasRanvier/cnn_style_transfer/tree/90b6c76c20263c22f4e45184d572284726ecbd7b
|
StructuralProbe
|
import torch
import torch.nn as nn
import torch.utils.data.dataloader
class StructuralProbe(nn.Module):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, device):
super().__init__()
self.probe_rank = rank
self.model_dim = model_dim
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.
probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
self
def forward(self, batch):
""" Computes all n^2 pairs of distances after projection
for each sentence in a batch.
Note that due to padding, some distances will be non-zero for pads.
Computes (B(h_i-h_j))^T(B(h_i-h_j)) for all i,j
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of distances of shape (batch_size, max_seq_len, max_seq_len)
"""
transformed = torch.matmul(batch, self.proj)
_batchlen, seqlen, _rank = transformed.size()
transformed = transformed.unsqueeze(2)
transformed = transformed.expand(-1, -1, seqlen, -1)
transposed = transformed.transpose(1, 2)
diffs = transformed - transposed
squared_diffs = diffs.pow(2)
squared_distances = torch.sum(squared_diffs, -1)
return squared_distances
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4, 'rank': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x4, tmp18, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf1, buf0, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0)
class StructuralProbeNew(nn.Module):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, device):
super().__init__()
self.probe_rank = rank
self.model_dim = model_dim
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.
probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
self
def forward(self, input_0):
primals_1 = self.proj
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
TimO96/NLP2
|
StructuralProbe
| false
| 1,140
|
[
"MIT"
] | 0
|
83f65a385457f68397c641f38b53df0110282578
|
https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578
|
Pooling
|
import torch
import torch.nn as nn
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
tmp54 = tl.load(in_ptr0 + x3, xmask)
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x3), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x3), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x3), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x3, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -
1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (4 * (4 <= 2 + x0) + (2 + x0
) * (2 + x0 < 4)) * (4 * (4 <= 2 + x1) + (2 + x1) * (2 + x1 < 4)
) + -1 * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (4 * (4 <=
2 + x1) + (2 + x1) * (2 + x1 < 4)) + -1 * (0 * (0 >= -1 + x1) + (-1 +
x1) * (-1 + x1 > 0)) * (4 * (4 <= 2 + x0) + (2 + x0) * (2 + x0 < 4))
tmp53 = tmp51 / tmp52
tmp55 = tmp53 - tmp54
tl.store(in_out_ptr0 + x3, tmp55, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_avg_pool2d_sub_0[grid(256)](buf1, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class PoolingNew(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
TranNhiem/MA_SSRL_Pytorch
|
Pooling
| false
| 1,141
|
[
"MIT"
] | 0
|
87d946461850240fdd54de761603f13ef3710c2b
|
https://github.com/TranNhiem/MA_SSRL_Pytorch/tree/87d946461850240fdd54de761603f13ef3710c2b
|
TwoWordBilinearLabelProbe
|
import torch
import torch.nn as nn
import torch.utils.data.dataloader
class TwoWordBilinearLabelProbe(nn.Module):
""" Computes a bilinear function of pairs of vectors.
For a batch of sentences, computes all n^2 pairs of scores
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, prob, device):
super(TwoWordBilinearLabelProbe, self).__init__()
self.maximum_rank = rank
self.model_dim = model_dim
self.proj_L = nn.Parameter(data=torch.zeros(self.model_dim, self.
maximum_rank))
self.proj_R = nn.Parameter(data=torch.zeros(self.maximum_rank, self
.model_dim))
self.bias = nn.Parameter(data=torch.zeros(1))
nn.init.uniform_(self.proj_L, -0.05, 0.05)
nn.init.uniform_(self.proj_R, -0.05, 0.05)
nn.init.uniform_(self.bias, -0.05, 0.05)
self
self.dropout = nn.Dropout(p=prob)
def forward(self, batch):
""" Computes all n^2 pairs of attachment scores
for each sentence in a batch.
Computes h_i^TAh_j for all i,j
where A = LR, L in R^{model_dim x maximum_rank}; R in R^{maximum_rank x model_rank}
hence A is rank-constrained to maximum_rank.
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of scores of shape (batch_size, max_seq_len, max_seq_len)
"""
batchlen, seqlen, rank = batch.size()
batch = self.dropout(batch)
proj = torch.mm(self.proj_L, self.proj_R)
batch_square = batch.unsqueeze(2).expand(batchlen, seqlen, seqlen, rank
)
batch_transposed = batch.unsqueeze(1).expand(batchlen, seqlen,
seqlen, rank).contiguous().view(batchlen * seqlen * seqlen, rank, 1
)
psd_transformed = torch.matmul(batch_square.contiguous(), proj).view(
batchlen * seqlen * seqlen, 1, rank)
logits = (torch.bmm(psd_transformed, batch_transposed) + self.bias
).view(batchlen, seqlen, seqlen)
return logits
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4, 'rank': 4, 'prob': 0.5, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_3, out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_view_0[grid(256)](primals_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_1, buf3, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf4 = empty_strided_cuda((64, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 4, 1),
0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4)
del buf2
buf5 = buf4
del buf4
triton_poi_fused_add_2[grid(64)](buf5, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0)
class TwoWordBilinearLabelProbeNew(nn.Module):
""" Computes a bilinear function of pairs of vectors.
For a batch of sentences, computes all n^2 pairs of scores
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, prob, device):
super(TwoWordBilinearLabelProbeNew, self).__init__()
self.maximum_rank = rank
self.model_dim = model_dim
self.proj_L = nn.Parameter(data=torch.zeros(self.model_dim, self.
maximum_rank))
self.proj_R = nn.Parameter(data=torch.zeros(self.maximum_rank, self
.model_dim))
self.bias = nn.Parameter(data=torch.zeros(1))
nn.init.uniform_(self.proj_L, -0.05, 0.05)
nn.init.uniform_(self.proj_R, -0.05, 0.05)
nn.init.uniform_(self.bias, -0.05, 0.05)
self
self.dropout = nn.Dropout(p=prob)
def forward(self, input_0):
primals_2 = self.proj_L
primals_3 = self.proj_R
primals_4 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
TimO96/NLP2
|
TwoWordBilinearLabelProbe
| false
| 1,142
|
[
"MIT"
] | 0
|
83f65a385457f68397c641f38b53df0110282578
|
https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578
|
Policy
|
import torch
from torch import nn
from torch.nn import functional as F
class Policy(nn.Module):
def __init__(self, act_dim, obs_dim):
super(Policy, self).__init__()
self.fc0 = nn.Linear(act_dim, 128)
self.fc1 = nn.Linear(128, obs_dim)
def forward(self, x):
x = x.type_as(self.fc0.bias)
x = F.relu(self.fc0(x))
x = F.dropout(x, training=self.training)
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'act_dim': 4, 'obs_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (128, 4), (4, 1))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), out=buf0)
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf3, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3
class PolicyNew(nn.Module):
def __init__(self, act_dim, obs_dim):
super(PolicyNew, self).__init__()
self.fc0 = nn.Linear(act_dim, 128)
self.fc1 = nn.Linear(128, obs_dim)
def forward(self, input_0):
primals_3 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TommeyChang/CS294-Homework
|
Policy
| false
| 1,143
|
[
"MIT"
] | 0
|
17b525bf4366034b45c4febd89f1053d44550237
|
https://github.com/TommeyChang/CS294-Homework/tree/17b525bf4366034b45c4febd89f1053d44550237
|
CrossEntropyLoss
|
import torch
import torch.nn as nn
import torch.utils.data.dataloader
class CrossEntropyLoss(nn.Module):
"""Custom cross-entropy loss"""
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.pytorch_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1,
reduction='sum')
def forward(self, predictions, label_batch, length_batch):
"""
Computes and returns CrossEntropyLoss.
Ignores all entries where label_batch=-1
Noralizes by the number of sentences in the batch.
Args:
predictions: A pytorch batch of logits
label_batch: A pytorch batch of label indices
length_batch: A pytorch batch of sentence lengths
Returns:
A tuple of:
cross_entropy_loss: average loss in the batch
total_sents: number of sentences in the batch
"""
batchlen, seqlen, class_count = predictions.size()
total_sents = torch.sum(length_batch != 0).float()
predictions = predictions.view(batchlen * seqlen, class_count)
label_batch = label_batch.view(batchlen * seqlen).long()
cross_entropy_loss = self.pytorch_ce_loss(predictions, label_batch
) / total_sents
return cross_entropy_loss, total_sents
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([16]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data.dataloader
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__to_copy_nll_loss_forward_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp12 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([1, 1], -1, tl.int64)
tmp3 = tmp1 != tmp2
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.where(tmp3, tmp1, tmp4)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4),
'index out of bounds: 0 <= tmp9 < 4')
tmp11 = tl.load(in_ptr1 + (tmp9 + 4 * r0), None, eviction_policy=
'evict_last')
tmp13 = tl_math.exp(tmp12)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp11 - tmp23
tmp25 = -tmp24
tmp26 = 0.0
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
@triton.jit
def triton_per_fused__to_copy_div_ne_sum_2(in_out_ptr0, in_ptr0, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_out_ptr0 + 0)
tmp9 = tl.broadcast_to(tmp8, [1])
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6.to(tl.float32)
tmp10 = tmp9 / tmp7
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_nll_loss_forward_1[grid(1)](arg2_1, buf0,
buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg2_1
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1
del buf1
triton_per_fused__to_copy_div_ne_sum_2[grid(1)](buf4, arg1_1, buf3,
1, 256, num_warps=2, num_stages=1)
del arg1_1
return buf4, buf3
class CrossEntropyLossNew(nn.Module):
"""Custom cross-entropy loss"""
def __init__(self):
super(CrossEntropyLossNew, self).__init__()
self.pytorch_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1,
reduction='sum')
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg2_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
TimO96/NLP2
|
CrossEntropyLoss
| false
| 1,144
|
[
"MIT"
] | 0
|
83f65a385457f68397c641f38b53df0110282578
|
https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578
|
GlobalAttentionGeneral
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context.unsqueeze(3)
sourceT = self.conv_context(sourceT).squeeze(3)
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data.bool(), -float('inf'))
attn = self.sm(attn)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(sourceT, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'idf': 4, 'cdf': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask)
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4,
4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=
(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1))
buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1,
buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4,
1), 0), out=buf2)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), buf4, out=buf5)
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1,
1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class GlobalAttentionGeneralNew(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneralNew, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1):
primals_3 = self.conv_context.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
Thesis-02F/Style-Attn
|
GlobalAttentionGeneral
| false
| 1,145
|
[
"MIT"
] | 0
|
55f78de4858e395ebf9750a23923fd772600290f
|
https://github.com/Thesis-02F/Style-Attn/tree/55f78de4858e395ebf9750a23923fd772600290f
|
Mlp
|
import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_gelu_0[grid(256)](buf1, primals_2,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf2
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class MlpNew(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TranNhiem/solo-learn
|
Mlp
| false
| 1,146
|
[
"MIT"
] | 0
|
7539732b68d153087d09a26a23e1edfdc49bc086
|
https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086
|
TokenMixer
|
import torch
import torch.nn as nn
class TokenMixer(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(TokenMixer, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x):
input = x
x = torch.transpose(x, 1, 2)
x = self.fc1(x)
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
if self.dropout is not None:
x = self.dropout(x)
x = torch.transpose(x, 1, 2)
x = x + input
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_1[grid(256)](buf1, primals_3, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 4, 16, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_5, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class TokenMixerNew(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(TokenMixerNew, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TheRealMarVin/mlp-mixer
|
TokenMixer
| false
| 1,147
|
[
"MIT"
] | 0
|
2124cb5c5adfc7af473cab535095471d4943adab
|
https://github.com/TheRealMarVin/mlp-mixer/tree/2124cb5c5adfc7af473cab535095471d4943adab
|
SelfAttention
|
import torch
import numpy as np
import torch.nn as nn
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sqrt_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf1
)
del arg0_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class SelfAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttentionNew, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
TranQuocTrinh/image_captioning
|
SelfAttention
| false
| 1,148
|
[
"MIT"
] | 0
|
4c2d77426ba3b9fe9151a15a958320d5298aa190
|
https://github.com/TranQuocTrinh/image_captioning/tree/4c2d77426ba3b9fe9151a15a958320d5298aa190
|
AveragePoolingLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AveragePoolingLayer(nn.Module):
"""Implements the average pooling layer.
Basically, this layer can be used to downsample feature maps from spatial
domain.
"""
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, x):
ksize = [self.scale_factor, self.scale_factor]
strides = [self.scale_factor, self.scale_factor]
return F.avg_pool2d(x, kernel_size=ksize, stride=strides, padding=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AveragePoolingLayerNew(nn.Module):
"""Implements the average pooling layer.
Basically, this layer can be used to downsample feature maps from spatial
domain.
"""
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Twizwei/idinvert_pytorch
|
AveragePoolingLayer
| false
| 1,149
|
[
"MIT"
] | 0
|
11f1126aab517fbe32b488d92f6fdea339463d04
|
https://github.com/Twizwei/idinvert_pytorch/tree/11f1126aab517fbe32b488d92f6fdea339463d04
|
LayerNormChannel
|
import torch
import torch.nn as nn
class LayerNormChannel(nn.Module):
"""
LayerNorm only for Channel Dimension.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight.unsqueeze(-1).unsqueeze(-1) * x + self.bias.unsqueeze(
-1).unsqueeze(-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_2,
buf0, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormChannelNew(nn.Module):
"""
LayerNorm only for Channel Dimension.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TranNhiem/MA_SSRL_Pytorch
|
LayerNormChannel
| false
| 1,150
|
[
"MIT"
] | 0
|
87d946461850240fdd54de761603f13ef3710c2b
|
https://github.com/TranNhiem/MA_SSRL_Pytorch/tree/87d946461850240fdd54de761603f13ef3710c2b
|
Whitening2d
|
import torch
import torch.nn as nn
from torch.cuda.amp import custom_fwd
from torch.nn.functional import conv2d
class Whitening2d(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2d, self).__init__()
self.output_dim = output_dim
self.eps = eps
@custom_fwd(cast_inputs=torch.float32)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Performs whitening using the Cholesky decomposition.
Args:
x (torch.Tensor): a batch or slice of projected features.
Returns:
torch.Tensor: a batch or slice of whitened features.
"""
x = x.unsqueeze(2).unsqueeze(3)
m = x.mean(0).view(self.output_dim, -1).mean(-1).view(1, -1, 1, 1)
xn = x - m
T = xn.permute(1, 0, 2, 3).contiguous().view(self.output_dim, -1)
f_cov = torch.mm(T, T.permute(1, 0)) / (T.shape[-1] - 1)
eye = torch.eye(self.output_dim).type(f_cov.type())
f_cov_shrinked = (1 - self.eps) * f_cov + self.eps * eye
inv_sqrt = torch.triangular_solve(eye, torch.cholesky(
f_cov_shrinked), upper=False)[0]
inv_sqrt = inv_sqrt.contiguous().view(self.output_dim, self.
output_dim, 1, 1)
decorrelated = conv2d(xn, inv_sqrt)
return decorrelated.squeeze(2).squeeze(2)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = 1.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_view_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__to_copy_add_div_eye_mul_2(in_out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = x1
tmp6 = x0
tmp7 = tmp5 == tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp7, tmp3, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp4 + tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_eye_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clone_view_1[grid(4, 4)](buf0, buf1, 4, 4, XBLOCK=
4, YBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
del buf1
buf3 = buf2
del buf2
triton_poi_fused__to_copy_add_div_eye_mul_2[grid(16)](buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = torch.ops.aten.cholesky.default(buf3)
buf5 = buf4
del buf4
buf6 = buf3
del buf3
triton_poi_fused__to_copy_eye_3[grid(16)](buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = torch.ops.aten.triangular_solve.default(buf6, buf5, False)
del buf5
buf8 = buf7[0]
del buf7
buf10 = buf6
del buf6
triton_poi_fused_clone_view_1[grid(4, 4)](buf8, buf10, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
del buf8
buf11 = extern_kernels.convolution(buf0, reinterpret_tensor(buf10,
(4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 1, 1), (4, 1, 1, 1))
del buf0
del buf10
return reinterpret_tensor(buf11, (4, 4), (4, 1), 0),
class Whitening2dNew(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2dNew, self).__init__()
self.output_dim = output_dim
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
TranNhiem/solo-learn
|
Whitening2d
| false
| 1,151
|
[
"MIT"
] | 0
|
7539732b68d153087d09a26a23e1edfdc49bc086
|
https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086
|
ClassificationModel
|
import torch
import torch.nn as nn
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256, dropout1=0.25, dropout2=0.25):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout1)
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout2)
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.dropout1(out)
out = self.conv4(out)
out = self.act4(out)
out = self.dropout2(out)
out = self.output(out)
out = self.output_act(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.
num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_clone_convolution_5(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 46080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 720
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (720, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (720,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_4, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_6, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_8, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((720, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(184320, 9)](primals_10, buf5, 184320, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(16384)](buf7, primals_2,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf8 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_5,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(16384)](buf11, primals_7,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(16384)](buf13, primals_9,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 720, 4, 4), (11520, 1, 2880, 720))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 9, 80), (11520, 2880, 720, 80,
1), torch.float32)
triton_poi_fused_clone_convolution_5[grid(46080)](buf15, primals_11,
buf16, 46080, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
return reinterpret_tensor(buf16, (4, 144, 80), (11520, 80, 1), 0
), buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15
class ClassificationModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256, dropout1=0.25, dropout2=0.25):
super(ClassificationModelNew, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout1)
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout2)
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
TobyChen0106/DeepQ_Final_B05901170
|
ClassificationModel
| false
| 1,152
|
[
"Apache-2.0"
] | 0
|
808a224c01272726a051eb7b7bb9e1b28887716e
|
https://github.com/TobyChen0106/DeepQ_Final_B05901170/tree/808a224c01272726a051eb7b7bb9e1b28887716e
|
ContrastiveLoss
|
import torch
from torch import nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
contrastive loss
L2 distance:
L(a1,a2,y) = y * d(a1, a2) + (1-y)*max(0, m - d(a1, a2))
cosine distance:
L(a1, a2, y) = y * (1 - d(a1,a2)) + (1-y) * max(0, d(a1,a2) -m)
where y=1 if (a1,a2) relevant else 0
"""
def __init__(self, margin=1.0, metric='l2'):
super().__init__()
self.margin = margin
self.metric = metric
metric_list = ['l2', 'cosine']
assert metric in metric_list, 'Error! contrastive metric %s not supported.' % metric
self.metric_id = metric_list.index(metric)
def forward(self, x, y):
a, p = x.chunk(2, dim=0)
if self.metric_id == 0:
dist = torch.sum((a - p) ** 2, dim=1)
loss = y * dist + (1 - y) * F.relu(self.margin - dist)
else:
dist = F.cosine_similarity(a, p)
loss = y * (1 - dist) + (1 - y) * F.relu(dist - self.margin)
return loss.mean() / 2.0
def extra_repr(self) ->str:
return '?xD -> scalar (Loss)'
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (144 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (160 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (176 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_relu_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 32
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = tmp3 - tmp1
tmp6 = tl.full([1, 1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp4 * tmp7
tmp9 = tmp2 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(32)](arg0_1, buf0, 32, XBLOCK=
32, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_div_mean_mul_relu_rsub_1[grid(1)](buf2, arg1_1,
buf0, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
contrastive loss
L2 distance:
L(a1,a2,y) = y * d(a1, a2) + (1-y)*max(0, m - d(a1, a2))
cosine distance:
L(a1, a2, y) = y * (1 - d(a1,a2)) + (1-y) * max(0, d(a1,a2) -m)
where y=1 if (a1,a2) relevant else 0
"""
def __init__(self, margin=1.0, metric='l2'):
super().__init__()
self.margin = margin
self.metric = metric
metric_list = ['l2', 'cosine']
assert metric in metric_list, 'Error! contrastive metric %s not supported.' % metric
self.metric_id = metric_list.index(metric)
def extra_repr(self) ->str:
return '?xD -> scalar (Loss)'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
TuBui/deep_image_comparator
|
ContrastiveLoss
| false
| 1,153
|
[
"MIT"
] | 0
|
2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
GreedyHashLoss
|
import torch
class GreedyHashLoss(torch.nn.Module):
def __init__(self):
super(GreedyHashLoss, self).__init__()
def forward(self, u):
b = GreedyHashLoss.Hash.apply(u)
loss = (u.abs() - 1).pow(3).abs().mean()
return b, loss
class Hash(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_pow_sign_sub_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = tl_math.abs(tmp0)
tmp9 = 1.0
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp11 * tmp10
tmp13 = tl_math.abs(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_mean_pow_sign_sub_0[grid(1)](buf2, arg0_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf0, buf2
class GreedyHashLossNew(torch.nn.Module):
def __init__(self):
super(GreedyHashLossNew, self).__init__()
class Hash(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
|
TuBui/deep_image_comparator
|
GreedyHashLoss
| false
| 1,154
|
[
"MIT"
] | 0
|
2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
MultiHeadAttention
|
import torch
import numpy as np
import torch.nn as nn
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, embedding_dim, num_heads, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.embedding_dim = embedding_dim
self.self_attention = SelfAttention(dropout)
self.num_heads = num_heads
self.dim_per_head = embedding_dim // num_heads
self.query_projection = nn.Linear(embedding_dim, embedding_dim)
self.key_projection = nn.Linear(embedding_dim, embedding_dim)
self.value_projection = nn.Linear(embedding_dim, embedding_dim)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(embedding_dim, embedding_dim)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
query = self.query_projection(query)
key = self.key_projection(key)
value = self.value_projection(value)
query = query.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
key = key.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
value = value.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
scores = self.self_attention(query, key, value, mask)
output = scores.transpose(1, 2).contiguous().view(batch_size, -1,
self.embedding_dim)
output = self.out(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK=
32, num_warps=4, num_stages=1)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0),
out=buf10)
buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_11
return reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
class MultiHeadAttentionNew(nn.Module):
def __init__(self, embedding_dim, num_heads, dropout=0.1):
super(MultiHeadAttentionNew, self).__init__()
self.embedding_dim = embedding_dim
self.self_attention = SelfAttention(dropout)
self.num_heads = num_heads
self.dim_per_head = embedding_dim // num_heads
self.query_projection = nn.Linear(embedding_dim, embedding_dim)
self.key_projection = nn.Linear(embedding_dim, embedding_dim)
self.value_projection = nn.Linear(embedding_dim, embedding_dim)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(embedding_dim, embedding_dim)
def forward(self, input_0, input_1, input_2):
primals_2 = self.query_projection.weight
primals_3 = self.query_projection.bias
primals_4 = self.key_projection.weight
primals_5 = self.key_projection.bias
primals_7 = self.value_projection.weight
primals_8 = self.value_projection.bias
primals_10 = self.out.weight
primals_11 = self.out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
TranQuocTrinh/image_captioning
|
MultiHeadAttention
| false
| 1,155
|
[
"MIT"
] | 0
|
4c2d77426ba3b9fe9151a15a958320d5298aa190
|
https://github.com/TranQuocTrinh/image_captioning/tree/4c2d77426ba3b9fe9151a15a958320d5298aa190
|
LastBlock
|
import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlock(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.fc(x) * self.scale
x = x.view(x.shape[0], x.shape[1], 1, 1)
return self.bn(x).view(x.shape[0], x.shape[1])
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf1, primals_1
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlockNew(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Twizwei/idinvert_pytorch
|
LastBlock
| false
| 1,156
|
[
"MIT"
] | 0
|
11f1126aab517fbe32b488d92f6fdea339463d04
|
https://github.com/Twizwei/idinvert_pytorch/tree/11f1126aab517fbe32b488d92f6fdea339463d04
|
SineODE
|
import math
import torch
class SineODE(torch.nn.Module):
def forward(self, t, y):
return 2 * y / t + t ** 4 * torch.sin(2 * t) - t ** 2 + 4 * t ** 3
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sin_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tmp5 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp3 * tmp1
tmp8 = tl_math.sin(tmp7)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp5
tmp12 = tmp5 * tmp3
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sin_sub_0[grid(256)](arg0_1,
arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SineODENew(torch.nn.Module):
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
TylerChoi1224/torchdiffeq
|
SineODE
| false
| 1,157
|
[
"MIT"
] | 0
|
72f74d9651a58ab11cdadd60682f1b61e625ef53
|
https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53
|
ObjectClassifier
|
import torch
from torch import nn
class ObjectClassifier(nn.Module):
"""
perform log likelihood over sequence data ie. log(softmax), permute dimension
accordingly to meet NLLLoss requirement
Input: [seq_len, bsz, d_input]
Output: [bsz, num_classes, seq_len]
Usage:
bsz=5; seq=16; d_input=1024; num_classes=10
classiifer = ObjectClassifier(d_input, num_classes)
x = torch.rand(seq, bsz, d_input) # 16x5x1024
out = classifier(x) # 5x10x16
"""
def __init__(self, d_input, num_classes):
super(ObjectClassifier, self).__init__()
self.d_input = d_input
self.num_classes = num_classes
self.linear = nn.Linear(d_input, num_classes)
self.classifier = nn.LogSoftmax(dim=1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
out = self.linear(x)
out = out.permute(1, 2, 0)
return self.classifier(out)
def extra_repr(self) ->str:
return 'SxBx%d -> Bx%dxS' % (self.d_input, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_input': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y1 + 4 * x2), xmask & ymask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (y1 + 4 * x2), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tl.store(out_ptr0 + (x2 + 4 * y3), tmp7, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4), (1, 16, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 4), (1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(16)](buf0, primals_2, buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(16, 4)](buf0, primals_2, buf1,
buf2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del primals_2
return buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf3
class ObjectClassifierNew(nn.Module):
"""
perform log likelihood over sequence data ie. log(softmax), permute dimension
accordingly to meet NLLLoss requirement
Input: [seq_len, bsz, d_input]
Output: [bsz, num_classes, seq_len]
Usage:
bsz=5; seq=16; d_input=1024; num_classes=10
classiifer = ObjectClassifier(d_input, num_classes)
x = torch.rand(seq, bsz, d_input) # 16x5x1024
out = classifier(x) # 5x10x16
"""
def __init__(self, d_input, num_classes):
super(ObjectClassifierNew, self).__init__()
self.d_input = d_input
self.num_classes = num_classes
self.linear = nn.Linear(d_input, num_classes)
self.classifier = nn.LogSoftmax(dim=1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def extra_repr(self) ->str:
return 'SxBx%d -> Bx%dxS' % (self.d_input, self.num_classes)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TuBui/deep_image_comparator
|
ObjectClassifier
| false
| 1,158
|
[
"MIT"
] | 0
|
2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
ResidualBlock
|
import torch
import torch.nn as nn
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super(GenericLayer, self).__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super(ResidualBlock, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, x):
x = self._conv_1(x)
x = x + self._conv_2(x)
return x
def get_inputs():
return [torch.rand([4, 128, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'kernel_size': 4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 6
x3 = xindex // 6
y4 = yindex
x5 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x3)) + 16 * y4),
xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 128 * x5 + 4608 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 * r1 + 2048 * (x0 // 128) + x0 % 128),
xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 128
x2 = xindex // 2048
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_6(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 6
x2 = xindex // 768 % 6
x3 = xindex // 4608
x4 = xindex
tmp0 = tl.load(in_ptr0 + (1920 + x0 + -512 * tl_math.abs(-3 + tl_math.
abs(-1 + x2)) + -128 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) +
2048 * x3), None)
tl.store(out_ptr0 + x4, tmp0, None)
@triton.jit
def triton_poi_fused_add_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 16
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 128 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 128 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp0 + tmp11
tl.store(out_ptr0 + (y0 + 16 * x2 + 2048 * y1), tmp12, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_2, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (128,), (1,))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16384, 9)](primals_2, buf0, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_0[grid(16384, 9)](primals_6, buf1, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_reflection_pad2d_1[grid(512, 36)](primals_1, buf2,
512, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 4, 4), (2048, 1, 512, 128))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(8192)](buf4, primals_3, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_4, buf5, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf6 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_5, buf6, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
buf8 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
buf10 = buf8
del buf8
triton_per_fused__native_batch_norm_legit_4[grid(512)](buf10, buf4,
buf7, 512, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_relu_5[grid(8192)](buf4, buf7, buf10, buf5, buf6,
buf11, 8192, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_reflection_pad2d_6[grid(18432)](buf11, buf12,
18432, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf12, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 4, 4), (2048, 1, 512, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_2[grid(8192)](buf14, primals_7, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_8, buf15, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_8
buf16 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_9, buf16, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf18 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf20 = buf18
del buf18
triton_per_fused__native_batch_norm_legit_4[grid(512)](buf20, buf14,
buf17, 512, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf21 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused_add_relu_7[grid(64, 128)](buf11, buf14, buf17,
buf20, buf15, buf16, buf21, 64, 128, XBLOCK=4, YBLOCK=64,
num_warps=4, num_stages=1)
del buf11
return (buf21, buf0, buf1, buf2, buf4, buf5, buf6, buf7, buf10, buf12,
buf14, buf15, buf16, buf17, buf20)
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super(GenericLayer, self).__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlockNew(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super(ResidualBlockNew, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, input_0):
primals_2 = self._conv_1._layer.weight
primals_3 = self._conv_1._layer.bias
primals_4 = self._conv_1._norm.weight
primals_5 = self._conv_1._norm.bias
primals_6 = self._conv_2._layer.weight
primals_7 = self._conv_2._layer.bias
primals_8 = self._conv_2._norm.weight
primals_9 = self._conv_2._norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ThomasRanvier/cnn_style_transfer
|
ResidualBlock
| false
| 1,159
|
[
"MIT"
] | 0
|
90b6c76c20263c22f4e45184d572284726ecbd7b
|
https://github.com/ThomasRanvier/cnn_style_transfer/tree/90b6c76c20263c22f4e45184d572284726ecbd7b
|
FirstBlock
|
import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class FirstBlock(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, x):
return self.activate(self.bn(self.conv(x) * self.scale))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0(in_out_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0[grid(256)](buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class FirstBlockNew(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Twizwei/idinvert_pytorch
|
FirstBlock
| false
| 1,160
|
[
"MIT"
] | 0
|
11f1126aab517fbe32b488d92f6fdea339463d04
|
https://github.com/Twizwei/idinvert_pytorch/tree/11f1126aab517fbe32b488d92f6fdea339463d04
|
HR2O_NL
|
import torch
import torch.nn as nn
class HR2O_NL(nn.Module):
def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False):
super(HR2O_NL, self).__init__()
self.hidden_dim = hidden_dim
padding = kernel_size // 2
self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv = nn.Conv2d(hidden_dim, hidden_dim, 1 if mlp_1x1 else
kernel_size, padding=0 if mlp_1x1 else padding, bias=False)
self.norm = nn.GroupNorm(1, hidden_dim, affine=True)
self.dp = nn.Dropout(0.2)
def forward(self, x):
query = self.conv_q(x).unsqueeze(1)
key = self.conv_k(x).unsqueeze(0)
att = (query * key).sum(2) / self.hidden_dim ** 0.5
att = nn.Softmax(dim=1)(att)
value = self.conv_v(x)
virt_feats = (att.unsqueeze(2) * value).sum(1)
virt_feats = self.norm(virt_feats)
virt_feats = nn.functional.relu(virt_feats)
virt_feats = self.conv(virt_feats)
virt_feats = self.dp(virt_feats)
x = x + virt_feats
return x
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_red_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x2 = xindex // 16384
x4 = xindex % 16384
_tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r3 + 512 * x0 + 2097152 * x2), rmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + 512 * x4), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = _tmp4 + tmp3
_tmp4 = tl.where(rmask, tmp5, _tmp4)
tmp4 = tl.sum(_tmp4, 1)[:, None]
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp3 = tl.load(in_ptr0 + (x0 + 16384 * x2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4096 + x0 + 16384 * x2), None,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8192 + x0 + 16384 * x2), None,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12288 + x0 + 16384 * x2), None,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.044194173824159216
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (x2 + 16384 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4096 + x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8192 + x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12288 + x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp8, ymask)
@triton.jit
def triton_poi_fused_mul_sum_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 512
x4 = xindex % 2097152
x5 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2097152 + x4), None, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4194304 + x4), None, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (6291456 + x4), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x5, tmp14, None)
@triton.jit
def triton_per_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 128
x1 = xindex // 128 % 128
x2 = xindex // 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 512 * ((r3 + 128 * x1) % 4096) +
2097152 * x2 + (r3 + 128 * x1) // 4096), None, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_group_norm_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = xindex // 128
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 16384 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 128 * r2 + 16384 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 128 * r2 + 16384 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_group_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 128 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 128 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 2097152.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_9(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x2 = xindex // 2097152
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2097152.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(in_out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_10(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = yindex // 4096
tmp0 = tl.load(in_ptr0 + (x2 + 512 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 512 * y3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + 4096 * x2 + 2097152 * y1), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_2, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_3, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (512,), (1,))
assert_size_stride(primals_7, (512, 512, 3, 3), (4608, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(262144, 9)](primals_1, buf0, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
triton_poi_fused_1[grid(2048, 4096)](primals_2, buf1, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_0[grid(262144, 9)](primals_3, buf2, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_0[grid(262144, 9)](primals_4, buf3, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_0[grid(262144, 9)](primals_7, buf4, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_7
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf6 = extern_kernels.convolution(buf1, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf7 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_red_fused_mul_sum_2[grid(65536)](buf5, buf6, buf7, 65536,
512, XBLOCK=64, RBLOCK=64, num_warps=16, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused__softmax_3[grid(65536)](buf7, buf8, 65536, XBLOCK=
256, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 64, 64), (16384, 1, 256, 4), 0)
del buf7
triton_poi_fused__softmax_4[grid(16, 4096)](buf8, buf9, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf1, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf11 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768,
512), torch.float32)
triton_poi_fused_mul_sum_5[grid(8388608)](buf9, buf10, buf11,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 1, 1, 1, 128, 128), (16384,
65536, 65536, 65536, 1, 128), 0)
del buf8
buf13 = empty_strided_cuda((4, 1, 1, 1, 128, 128), (16384, 65536,
65536, 65536, 1, 128), torch.float32)
buf14 = empty_strided_cuda((4, 1, 1, 1, 128, 128), (16384, 65536,
65536, 65536, 1, 128), torch.float32)
triton_per_fused_native_group_norm_6[grid(65536)](buf11, buf12,
buf13, buf14, 65536, 128, XBLOCK=32, num_warps=8, num_stages=1)
buf15 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512,
1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512,
1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512,
1), torch.float32)
triton_per_fused_native_group_norm_7[grid(512)](buf12, buf13, buf14,
buf15, buf16, buf17, 512, 128, XBLOCK=1, num_warps=2, num_stages=1)
del buf12
del buf13
del buf14
buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_native_group_norm_8[grid(4)](buf15, buf16, buf17,
buf18, buf19, buf21, 4, 128, XBLOCK=1, num_warps=2, num_stages=1)
del buf15
del buf16
del buf17
buf22 = buf11
del buf11
triton_poi_fused_native_group_norm_relu_9[grid(8388608)](buf22,
buf18, buf19, primals_5, primals_6, 8388608, XBLOCK=512,
num_warps=8, num_stages=1)
del buf19
del primals_6
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf24 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_10[grid(16384, 512)](buf1, buf23, buf24, 16384,
512, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf23
return (buf24, buf0, buf1, buf2, buf3, primals_5, buf4, buf5, buf6,
buf9, buf10, reinterpret_tensor(buf18, (4, 1), (1, 1), 0),
reinterpret_tensor(buf21, (4, 1), (1, 1), 0), buf22)
class HR2O_NLNew(nn.Module):
def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False):
super(HR2O_NLNew, self).__init__()
self.hidden_dim = hidden_dim
padding = kernel_size // 2
self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv = nn.Conv2d(hidden_dim, hidden_dim, 1 if mlp_1x1 else
kernel_size, padding=0 if mlp_1x1 else padding, bias=False)
self.norm = nn.GroupNorm(1, hidden_dim, affine=True)
self.dp = nn.Dropout(0.2)
def forward(self, input_0):
primals_1 = self.conv_q.weight
primals_3 = self.conv_k.weight
primals_4 = self.conv_v.weight
primals_7 = self.conv.weight
primals_5 = self.norm.weight
primals_6 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
StephenStorm/ACAR
|
HR2O_NL
| false
| 1,161
|
[
"Apache-2.0"
] | 0
|
21ef3eca7330bd62eccb645018c8e48d9fc52153
|
https://github.com/StephenStorm/ACAR/tree/21ef3eca7330bd62eccb645018c8e48d9fc52153
|
Gated_Recurrent_Unit
|
import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Gated_Recurrent_Unit(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_Unit, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input, hidden):
output = self.wih(F.relu(input)) + self.whh(F.relu(hidden))
if self.dropout:
output = F.dropout(output, training=self.training)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fea_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_relu_0[grid(256)](primals_4, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf4, primals_3, buf3, primals_6,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_3
del primals_6
return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
class Gated_Recurrent_UnitNew(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_UnitNew, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_2 = self.wih.weight
primals_3 = self.wih.bias
primals_5 = self.whh.weight
primals_6 = self.whh.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
SpartaG117/scene_graph_benchmark
|
Gated_Recurrent_Unit
| false
| 1,162
|
[
"MIT"
] | 0
|
e2e49940dd2f752b1faf9ae26707435ba3441bcb
|
https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb
|
ConstantODE
|
import torch
class ConstantODE(torch.nn.Module):
def __init__(self):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b)) ** 5
def y_exact(self, t):
return self.a * t + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp7 = tmp4 + tmp6
tmp8 = tmp2 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 * tmp8
tmp12 = tmp1 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_0[grid(256)](primals_1, primals_4,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class ConstantODENew(torch.nn.Module):
def __init__(self):
super(ConstantODENew, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def y_exact(self, t):
return self.a * t + self.b
def forward(self, input_0, input_1):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
TylerChoi1224/torchdiffeq
|
ConstantODE
| false
| 1,163
|
[
"MIT"
] | 0
|
72f74d9651a58ab11cdadd60682f1b61e625ef53
|
https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53
|
GradientReversal
|
from torch.autograd import Function
import torch
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversalNew(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversalNew, self).__init__()
self.lambda_ = lambda_
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
TheElderMindseeker/pytorch-domain-adaptation
|
GradientReversal
| false
| 1,164
|
[
"MIT"
] | 0
|
70ca862708bd6e59b5eee5d7c8bd808ef3457dc8
|
https://github.com/TheElderMindseeker/pytorch-domain-adaptation/tree/70ca862708bd6e59b5eee5d7c8bd808ef3457dc8
|
Decoder
|
import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super(Decoder, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, z):
out = self.fc1(z)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1 + 80 * (x1 % 4 // 4) + 320 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 20), (20, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1,
primals_2, buf4, 1280, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
triton_poi_fused_view_1[grid(1280)](buf1, buf2, 1280, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(20, 2), (1, 20), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class DecoderNew(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super(DecoderNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TylerChoi1224/torchdiffeq
|
Decoder
| false
| 1,165
|
[
"MIT"
] | 0
|
72f74d9651a58ab11cdadd60682f1b61e625ef53
|
https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53
|
WeighedMSELoss
|
import torch
from torch import Tensor
from torch.nn import MSELoss
class WeighedMSELoss(MSELoss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
loss = super().forward(input, target)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weights': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import MSELoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WeighedMSELossNew(MSELoss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
UT-ADL/lidar-as-camera
|
WeighedMSELoss
| false
| 1,166
|
[
"Apache-2.0"
] | 0
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
https://github.com/UT-ADL/lidar-as-camera/tree/daccb2ae21b4899ecfd8611b7a27f91681617383
|
LanguageModelCriterion
|
import torch
import torch.nn as nn
from torch.autograd import *
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)].float()
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg0_1, arg1_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LanguageModelCriterionNew(nn.Module):
def __init__(self):
super(LanguageModelCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg1_1 = input_0
arg0_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
VISLANG-Lab/MGCL
|
LanguageModelCriterion
| false
| 1,167
|
[
"MIT"
] | 0
|
22da06ffa7410d9632bfda8eefb1b79e4f660de0
|
https://github.com/VISLANG-Lab/MGCL/tree/22da06ffa7410d9632bfda8eefb1b79e4f660de0
|
PoolFormerBlock
|
import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlock(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x + self.drop_path(self.layer_scale_1.unsqueeze(-1).
unsqueeze(-1) * self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.layer_scale_2.unsqueeze(-1).
unsqueeze(-1) * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_avg_pool2d_native_group_norm_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 4 % 4
r1 = rindex % 4
r6 = rindex
x0 = xindex
r3 = rindex // 16
tmp54 = tl.load(in_ptr1 + (r6 + 64 * x0), xmask, other=0.0)
tmp55 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (r6 + 64 * x0), xmask, other=0.0)
tmp83 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr4 + r3, None, eviction_policy='evict_last')
tmp0 = -1 + r2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + r1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + r6 + 64 * x0), tmp10 & xmask, other=0.0)
tmp12 = r1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + r6 + 64 * x0), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + r1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + r6 + 64 * x0), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = r2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + r6 + 64 * x0), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (r6 + 64 * x0), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + r6 + 64 * x0), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + r2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + r6 + 64 * x0), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + r6 + 64 * x0), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + r6 + 64 * x0), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (0 * (0 >= -
1 + r2) + (-1 + r2) * (-1 + r2 > 0)) + (4 * (4 <= 2 + r1) + (2 + r1
) * (2 + r1 < 4)) * (4 * (4 <= 2 + r2) + (2 + r2) * (2 + r2 < 4)
) + -1 * (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (4 * (4 <=
2 + r2) + (2 + r2) * (2 + r2 < 4)) + -1 * (0 * (0 >= -1 + r2) + (-1 +
r2) * (-1 + r2 > 0)) * (4 * (4 <= 2 + r1) + (2 + r1) * (2 + r1 < 4))
tmp53 = tmp51 / tmp52
tmp57 = tmp53 - tmp56
tmp58 = tmp55 * tmp57
tmp59 = tmp54 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tl.where(xmask, tmp60, 0)
tmp63 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp65 = tl.where(xmask, tmp63, 0)
tmp66 = tl.sum(tmp65, 1)[:, None]
tmp67 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp66 / tmp68
tmp70 = tmp60 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.where(xmask, tmp72, 0)
tmp75 = tl.sum(tmp74, 1)[:, None]
tmp76 = tmp59 - tmp69
tmp77 = 64.0
tmp78 = tmp75 / tmp77
tmp79 = 1e-05
tmp80 = tmp78 + tmp79
tmp81 = libdevice.rsqrt(tmp80)
tmp82 = tmp76 * tmp81
tmp84 = tmp82 * tmp83
tmp86 = tmp84 + tmp85
tl.store(out_ptr0 + (r6 + 64 * x0), tmp53, xmask)
tl.store(out_ptr3 + (r6 + 64 * x0), tmp86, xmask)
tl.store(out_ptr4 + x0, tmp81, xmask)
tl.store(out_ptr1 + x0, tmp69, xmask)
@triton.jit
def triton_poi_fused_convolution_gelu_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp6 = tl.load(in_ptr4 + x3, xmask)
tmp10 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp7 = tmp5 - tmp6
tmp8 = tmp4 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(4)](primals_4, primals_2,
primals_3, buf0, buf3, buf16, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_avg_pool2d_native_group_norm_1[grid(4)](buf3,
primals_4, primals_1, primals_6, primals_7, buf4, buf5, buf8,
buf9, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 4, 4), (256, 16, 4, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_gelu_2[grid(1024)](buf11, primals_9,
buf12, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 16, 4, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_sub_3[grid(256)](buf14,
primals_11, primals_4, primals_1, buf4, buf3, primals_5, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf15, primals_1, primals_4, primals_5, primals_6, primals_8,
primals_10, buf3, buf4, buf8, reinterpret_tensor(buf5, (4, 1), (1,
1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, buf12,
buf14, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0),
reinterpret_tensor(buf16, (4, 1, 1), (1, 1, 1), 0))
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlockNew(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, input_0):
primals_1 = self.layer_scale_1
primals_2 = self.layer_scale_2
primals_3 = self.norm1.weight
primals_5 = self.norm1.bias
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_8 = self.mlp.fc1.weight
primals_9 = self.mlp.fc1.bias
primals_10 = self.mlp.fc2.weight
primals_11 = self.mlp.fc2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
TranNhiem/solo-learn
|
PoolFormerBlock
| false
| 1,168
|
[
"MIT"
] | 0
|
7539732b68d153087d09a26a23e1edfdc49bc086
|
https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086
|
SelfAttn
|
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttn(nn.Module):
"""
Self attention layer: aggreagating a sequence into a single vector.
This implementation uses the attention formula proposed by Sukhbaatar etal. 2015
https://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf
Usage:
seq_len=10; bsz=16; in_dim=128
attn = SelfAtnn(in_dim)
x = torch.rand(seq_len, bsz, in_dim) # 10x16x128
y, a = attn(x) # output y 16x128, attention weight a 10x16
"""
def __init__(self, d_input, units=None):
"""
:param d_input: input feature dimension
:param units: dimension of internal projection, if None it will be set to d_input
"""
super(SelfAttn, self).__init__()
self.d_input = d_input
self.units = units if units else d_input
self.projection = nn.Linear(self.d_input, self.units)
self.V = nn.Parameter(torch.Tensor(self.units, 1))
self.init_weights()
def init_weights(self):
initrange = 0.1
self.projection.bias.data.zero_()
self.projection.weight.data.uniform_(-initrange, initrange)
self.V.data.uniform_(-initrange, initrange)
def forward(self, x, mask=None):
"""
ui = tanh(xW+b)
a = softmax(uV)
o = sum(a*x)
:param x: input tensor [seq_len, bsz, feat_dim]
:return: output tensor [bsz, feat_dim]
"""
ui = torch.tanh(self.projection(x))
ai = F.softmax(torch.matmul(ui, self.V), dim=0)
if mask is not None:
ai = ai * mask.unsqueeze(-1)
ai = ai / ai.sum(dim=0, keepdim=True)
o = torch.sum(x * ai, dim=0)
return o, ai.squeeze(-1)
def extra_repr(self):
return 'Sx?x%d -> ?x%d' % (self.d_input, self.d_input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_input': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
triton_poi_fused_mul_sum_3[grid(64)](primals_3, buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf5, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0
), primals_3, buf1, buf4, reinterpret_tensor(primals_4, (1, 4), (1,
1), 0)
class SelfAttnNew(nn.Module):
"""
Self attention layer: aggreagating a sequence into a single vector.
This implementation uses the attention formula proposed by Sukhbaatar etal. 2015
https://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf
Usage:
seq_len=10; bsz=16; in_dim=128
attn = SelfAtnn(in_dim)
x = torch.rand(seq_len, bsz, in_dim) # 10x16x128
y, a = attn(x) # output y 16x128, attention weight a 10x16
"""
def __init__(self, d_input, units=None):
"""
:param d_input: input feature dimension
:param units: dimension of internal projection, if None it will be set to d_input
"""
super(SelfAttnNew, self).__init__()
self.d_input = d_input
self.units = units if units else d_input
self.projection = nn.Linear(self.d_input, self.units)
self.V = nn.Parameter(torch.Tensor(self.units, 1))
self.init_weights()
def init_weights(self):
initrange = 0.1
self.projection.bias.data.zero_()
self.projection.weight.data.uniform_(-initrange, initrange)
self.V.data.uniform_(-initrange, initrange)
def extra_repr(self):
return 'Sx?x%d -> ?x%d' % (self.d_input, self.d_input)
def forward(self, input_0):
primals_4 = self.V
primals_1 = self.projection.weight
primals_2 = self.projection.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
TuBui/deep_image_comparator
|
SelfAttn
| false
| 1,169
|
[
"MIT"
] | 0
|
2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44
|
SinkhornKnopp
|
import torch
import torch.distributed as dist
class SinkhornKnopp(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
@torch.no_grad()
def forward(self, Q: 'torch.Tensor') ->torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0]
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused_mul_7[grid(4, 4)](buf6, buf7, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_8[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4,
num_warps=1, num_stages=1)
del buf7
return buf8,
class SinkhornKnoppNew(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
TranNhiem/solo-learn
|
SinkhornKnopp
| false
| 1,170
|
[
"MIT"
] | 0
|
7539732b68d153087d09a26a23e1edfdc49bc086
|
https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086
|
RewardCriterion
|
import torch
import torch.nn as nn
from torch.autograd import *
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = (seq > 0).float()
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1
).reshape(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = r0 % 4
tmp12 = tl.full([1, 1], 0, tl.int64)
tmp14 = tl.full([1, 1], 1, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = 1.0
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp11 >= tmp14
tl.full([1, 1], 4, tl.int64)
tmp22 = tl.load(in_ptr0 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4),
[XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 > tmp12
tmp24 = tmp23.to(tl.float32)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp15, tmp18, tmp26)
tmp28 = tmp10 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp35 = tmp31 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class RewardCriterionNew(nn.Module):
def __init__(self):
super(RewardCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
VISLANG-Lab/MGCL
|
RewardCriterion
| false
| 1,171
|
[
"MIT"
] | 0
|
22da06ffa7410d9632bfda8eefb1b79e4f660de0
|
https://github.com/VISLANG-Lab/MGCL/tree/22da06ffa7410d9632bfda8eefb1b79e4f660de0
|
ResBlock
|
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](primals_1,
primals_2, primals_3, buf0, buf3, buf12, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf4, primals_5,
primals_6, buf5, buf9, buf8, 16, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_6
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_add_1[grid(256)](buf11, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4,
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(
buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1),
(4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0))
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlockNew, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, input_0):
primals_2 = self.norm1.weight
primals_3 = self.norm1.bias
primals_4 = self.conv1.weight
primals_5 = self.norm2.weight
primals_6 = self.norm2.bias
primals_7 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
TylerChoi1224/torchdiffeq
|
ResBlock
| false
| 1,172
|
[
"MIT"
] | 0
|
72f74d9651a58ab11cdadd60682f1b61e625ef53
|
https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53
|
MultiheadAttention
|
import torch
import torch.nn as nn
class MultiheadAttention(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttention, self).__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, x, key=None, value=None, residual=None, query_pos=
None, key_pos=None, attn_mask=None, key_padding_mask=None):
"""Forward function for `MultiheadAttention`.
Args:
x (Tensor): The input query with shape [num_query, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
key (Tensor): The key tensor with shape [num_key, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
Default None. If None, the `query` will be used.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Default None.
If None, the `key` will be used.
residual (Tensor): The tensor used for addition, with the
same shape as `x`. Default None. If None, `x` will be used.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. Default None. If not None, it will
be added to `x` before forward function.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Default None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`.
attn_mask (Tensor): ByteTensor mask with shape [num_query,
num_key]. Same in `nn.MultiheadAttention.forward`.
Default None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
Same in `nn.MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
query = x
if key is None:
key = query
if value is None:
value = key
if residual is None:
residual = x
if key_pos is None:
if query_pos is not None and key is not None:
if query_pos.shape == key.shape:
key_pos = query_pos
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
out = self.attn(query, key, value=value, attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
return residual + self.dropout(out)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dims': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_add_4[grid(16)](buf10, primals_1, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttentionNew, self).__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def forward(self, input_0):
primals_2 = self.attn.in_proj_weight
primals_3 = self.attn.in_proj_bias
primals_1 = self.attn.out_proj.weight
primals_5 = self.attn.out_proj.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
VIRC-lab-csust/AGMNet
|
MultiheadAttention
| false
| 1,173
|
[
"Apache-2.0"
] | 0
|
ead95466da343cf9436774138c642d2ca12da4e4
|
https://github.com/VIRC-lab-csust/AGMNet/tree/ead95466da343cf9436774138c642d2ca12da4e4
|
Model
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, action_dim, conv=False, conv_size=16,
fc_size=32, K=2):
super(Model, self).__init__()
self.input_size = input_size
self.input_h = int(np.sqrt(input_size))
self.action_dim = action_dim
self.K = K
self.conv = conv
self.conv_size = conv_size
self.fc_size = fc_size
if self.conv:
self.conv1 = nn.Conv2d(1, self.conv_size, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.fc = nn.Linear(2 * 2 * self.conv_size + self.action_dim,
self.fc_size)
else:
self.fc = nn.Linear(self.input_size + self.action_dim, self.fc_size
)
self.rew_out = nn.Linear(self.fc_size, 1)
self.pred_out = nn.Linear(self.fc_size, self.input_size)
def forward(self, x, a):
if self.conv:
out = x.unsqueeze(1)
out = F.relu(self.conv1(out))
out = F.relu(self.conv2(out))
out = F.relu(self.conv3(out))
out = F.relu(self.conv4(out))
out = out.view(out.size(0), -1)
out = torch.cat((out, a), dim=-1)
else:
out = torch.cat((x, a), dim=-1)
out = F.relu(self.fc(out))
return self.pred_out(out).reshape(out.size(0), self.input_h, self.
input_h), torch.sigmoid(self.rew_out(out))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (32, 8), (8, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (4, 32), (32, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 32), (32, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(128)](buf2, primals_4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(32, 4), (1, 32), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_7, (32, 1), (1,
32), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_8, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_8
return reinterpret_tensor(buf3, (4, 2, 2), (4, 2, 1), 0
), buf5, buf0, buf2, buf5, primals_7, primals_5
class ModelNew(nn.Module):
def __init__(self, input_size, action_dim, conv=False, conv_size=16,
fc_size=32, K=2):
super(ModelNew, self).__init__()
self.input_size = input_size
self.input_h = int(np.sqrt(input_size))
self.action_dim = action_dim
self.K = K
self.conv = conv
self.conv_size = conv_size
self.fc_size = fc_size
if self.conv:
self.conv1 = nn.Conv2d(1, self.conv_size, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.fc = nn.Linear(2 * 2 * self.conv_size + self.action_dim,
self.fc_size)
else:
self.fc = nn.Linear(self.input_size + self.action_dim, self.fc_size
)
self.rew_out = nn.Linear(self.fc_size, 1)
self.pred_out = nn.Linear(self.fc_size, self.input_size)
def forward(self, input_0, input_1):
primals_3 = self.fc.weight
primals_4 = self.fc.bias
primals_7 = self.rew_out.weight
primals_8 = self.rew_out.bias
primals_5 = self.pred_out.weight
primals_6 = self.pred_out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
|
VashishtMadhavan/pytorch-maml-rl
|
Model
| false
| 1,174
|
[
"MIT"
] | 0
|
d8821b8374d973869bb6a1393f1b2c369c9a664b
|
https://github.com/VashishtMadhavan/pytorch-maml-rl/tree/d8821b8374d973869bb6a1393f1b2c369c9a664b
|
SoftClDiceLoss
|
import torch
import numpy as np
from torch import nn
import torch.jit
import torch.nn.functional as F
import torch.nn.functional
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp,
dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp,
dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn,
dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn,
dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
def soft_erode(I):
p1 = -F.max_pool3d(-I, (3, 1, 1), (1, 1, 1), (1, 0, 0))
p2 = -F.max_pool3d(-I, (1, 3, 1), (1, 1, 1), (0, 1, 0))
p3 = -F.max_pool3d(-I, (1, 1, 3), (1, 1, 1), (0, 0, 1))
return torch.min(torch.min(p1, p3), p2)
def soft_dilate(I):
return F.max_pool3d(I, (3, 3, 3), (1, 1, 1), (1, 1, 1))
def soft_open(I):
return soft_dilate(soft_erode(I))
def soft_skel(img, k=50):
img1 = soft_open(img)
skel = F.relu(img - img1)
for iter in range(k):
img = soft_erode(img)
img1 = soft_open(img)
delta = F.relu(img - img1)
skel = skel + F.relu(delta - skel * delta)
if torch.cuda.is_available():
del img1
del delta
return skel
class SoftClDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, k=2):
"""
"""
super(SoftClDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.k = k
def softCenterline(self, I):
max = nn.MaxPool3d(3, stride=1, padding=1)
relu = nn.ReLU()
Ip = max(-max(-I))
cl = relu(I - Ip)
for iter in range(self.k):
I = -max(-I)
Ip = max(-max(-I))
cl = cl + cl * relu(I - Ip)
return cl
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
clp = soft_skel(x)
cll = soft_skel(y)
tp, _fp, fn, _tn = get_tp_fp_fn_tn(x, cll, axes, loss_mask, False)
tpc, fpc, _fnc, _tnc = get_tp_fp_fn_tn(clp, y, axes, loss_mask, False)
clp2vollnom = tpc + self.smooth
clp2vollden = tpc + fpc + self.smooth
clp2voll = clp2vollnom / clp2vollden
cll2volpnom = tp + self.smooth
cll2volpden = tp + fn + self.smooth
cll2volp = cll2volpnom / cll2volpden
dc = 2 * clp2voll * cll2volp / (cll2volp + clp2voll + 1e-08)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return 1 - dc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
from torch import nn
import torch.jit
import torch.nn.functional as F
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp1, xmask)
tl.store(out_ptr2 + x0, tmp1, xmask)
tl.store(out_ptr3 + x0, tmp1, xmask)
tl.store(out_ptr4 + x0, tmp1, xmask)
tl.store(out_ptr5 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tmp8 = -tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
tl.store(out_ptr2 + x0, tmp8, xmask)
tl.store(out_ptr3 + x0, tmp8, xmask)
tl.store(out_ptr4 + x0, tmp8, xmask)
tl.store(out_ptr5 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tmp8 = -tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
tl.store(out_ptr2 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_minimum_mul_neg_relu_sub_5(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5,
in_out_ptr6, in_out_ptr7, in_out_ptr8, in_out_ptr9, in_out_ptr10,
in_out_ptr11, in_out_ptr12, in_out_ptr13, in_out_ptr14, in_out_ptr15,
in_out_ptr16, in_out_ptr17, in_out_ptr18, in_out_ptr19, in_out_ptr20,
in_out_ptr21, in_out_ptr22, in_out_ptr23, in_out_ptr24, in_out_ptr25,
in_out_ptr26, in_out_ptr27, in_out_ptr28, in_out_ptr29, in_out_ptr30,
in_out_ptr31, in_out_ptr32, in_out_ptr33, in_out_ptr34, in_out_ptr35,
in_out_ptr36, in_out_ptr37, in_out_ptr38, in_out_ptr39, in_out_ptr40,
in_out_ptr41, in_out_ptr42, in_out_ptr43, in_out_ptr44, in_out_ptr45,
in_out_ptr46, in_out_ptr47, in_out_ptr48, in_out_ptr49, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15,
in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22,
in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29,
in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36,
in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43,
in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50,
in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57,
in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, in_ptr64,
in_ptr65, in_ptr66, in_ptr67, in_ptr68, in_ptr69, in_ptr70, in_ptr71,
in_ptr72, in_ptr73, in_ptr74, in_ptr75, in_ptr76, in_ptr77, in_ptr78,
in_ptr79, in_ptr80, in_ptr81, in_ptr82, in_ptr83, in_ptr84, in_ptr85,
in_ptr86, in_ptr87, in_ptr88, in_ptr89, in_ptr90, in_ptr91, in_ptr92,
in_ptr93, in_ptr94, in_ptr95, in_ptr96, in_ptr97, in_ptr98, in_ptr99,
in_ptr100, in_ptr101, in_ptr102, in_ptr103, in_ptr104, in_ptr105,
in_ptr106, in_ptr107, in_ptr108, in_ptr109, in_ptr110, in_ptr111,
in_ptr112, in_ptr113, in_ptr114, in_ptr115, in_ptr116, in_ptr117,
in_ptr118, in_ptr119, in_ptr120, in_ptr121, in_ptr122, in_ptr123,
in_ptr124, in_ptr125, in_ptr126, in_ptr127, in_ptr128, in_ptr129,
in_ptr130, in_ptr131, in_ptr132, in_ptr133, in_ptr134, in_ptr135,
in_ptr136, in_ptr137, in_ptr138, in_ptr139, in_ptr140, in_ptr141,
in_ptr142, in_ptr143, in_ptr144, in_ptr145, in_ptr146, in_ptr147,
in_ptr148, in_ptr149, in_ptr150, in_ptr151, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp10 = tl.load(in_ptr3 + x0, xmask)
tmp13 = tl.load(in_ptr4 + x0, xmask)
tmp20 = tl.load(in_out_ptr1 + x0, xmask)
tmp22 = tl.load(in_ptr5 + x0, xmask)
tmp25 = tl.load(in_ptr6 + x0, xmask)
tmp28 = tl.load(in_ptr7 + x0, xmask)
tmp35 = tl.load(in_out_ptr2 + x0, xmask)
tmp37 = tl.load(in_ptr8 + x0, xmask)
tmp40 = tl.load(in_ptr9 + x0, xmask)
tmp43 = tl.load(in_ptr10 + x0, xmask)
tmp50 = tl.load(in_out_ptr3 + x0, xmask)
tmp52 = tl.load(in_ptr11 + x0, xmask)
tmp55 = tl.load(in_ptr12 + x0, xmask)
tmp58 = tl.load(in_ptr13 + x0, xmask)
tmp65 = tl.load(in_out_ptr4 + x0, xmask)
tmp67 = tl.load(in_ptr14 + x0, xmask)
tmp70 = tl.load(in_ptr15 + x0, xmask)
tmp73 = tl.load(in_ptr16 + x0, xmask)
tmp80 = tl.load(in_out_ptr5 + x0, xmask)
tmp82 = tl.load(in_ptr17 + x0, xmask)
tmp85 = tl.load(in_ptr18 + x0, xmask)
tmp88 = tl.load(in_ptr19 + x0, xmask)
tmp95 = tl.load(in_out_ptr6 + x0, xmask)
tmp97 = tl.load(in_ptr20 + x0, xmask)
tmp100 = tl.load(in_ptr21 + x0, xmask)
tmp103 = tl.load(in_ptr22 + x0, xmask)
tmp110 = tl.load(in_out_ptr7 + x0, xmask)
tmp112 = tl.load(in_ptr23 + x0, xmask)
tmp115 = tl.load(in_ptr24 + x0, xmask)
tmp118 = tl.load(in_ptr25 + x0, xmask)
tmp125 = tl.load(in_out_ptr8 + x0, xmask)
tmp127 = tl.load(in_ptr26 + x0, xmask)
tmp130 = tl.load(in_ptr27 + x0, xmask)
tmp133 = tl.load(in_ptr28 + x0, xmask)
tmp140 = tl.load(in_out_ptr9 + x0, xmask)
tmp142 = tl.load(in_ptr29 + x0, xmask)
tmp145 = tl.load(in_ptr30 + x0, xmask)
tmp148 = tl.load(in_ptr31 + x0, xmask)
tmp155 = tl.load(in_out_ptr10 + x0, xmask)
tmp157 = tl.load(in_ptr32 + x0, xmask)
tmp160 = tl.load(in_ptr33 + x0, xmask)
tmp163 = tl.load(in_ptr34 + x0, xmask)
tmp170 = tl.load(in_out_ptr11 + x0, xmask)
tmp172 = tl.load(in_ptr35 + x0, xmask)
tmp175 = tl.load(in_ptr36 + x0, xmask)
tmp178 = tl.load(in_ptr37 + x0, xmask)
tmp185 = tl.load(in_out_ptr12 + x0, xmask)
tmp187 = tl.load(in_ptr38 + x0, xmask)
tmp190 = tl.load(in_ptr39 + x0, xmask)
tmp193 = tl.load(in_ptr40 + x0, xmask)
tmp200 = tl.load(in_out_ptr13 + x0, xmask)
tmp202 = tl.load(in_ptr41 + x0, xmask)
tmp205 = tl.load(in_ptr42 + x0, xmask)
tmp208 = tl.load(in_ptr43 + x0, xmask)
tmp215 = tl.load(in_out_ptr14 + x0, xmask)
tmp217 = tl.load(in_ptr44 + x0, xmask)
tmp220 = tl.load(in_ptr45 + x0, xmask)
tmp223 = tl.load(in_ptr46 + x0, xmask)
tmp230 = tl.load(in_out_ptr15 + x0, xmask)
tmp232 = tl.load(in_ptr47 + x0, xmask)
tmp235 = tl.load(in_ptr48 + x0, xmask)
tmp238 = tl.load(in_ptr49 + x0, xmask)
tmp245 = tl.load(in_out_ptr16 + x0, xmask)
tmp247 = tl.load(in_ptr50 + x0, xmask)
tmp250 = tl.load(in_ptr51 + x0, xmask)
tmp253 = tl.load(in_ptr52 + x0, xmask)
tmp260 = tl.load(in_out_ptr17 + x0, xmask)
tmp262 = tl.load(in_ptr53 + x0, xmask)
tmp265 = tl.load(in_ptr54 + x0, xmask)
tmp268 = tl.load(in_ptr55 + x0, xmask)
tmp275 = tl.load(in_out_ptr18 + x0, xmask)
tmp277 = tl.load(in_ptr56 + x0, xmask)
tmp280 = tl.load(in_ptr57 + x0, xmask)
tmp283 = tl.load(in_ptr58 + x0, xmask)
tmp290 = tl.load(in_out_ptr19 + x0, xmask)
tmp292 = tl.load(in_ptr59 + x0, xmask)
tmp295 = tl.load(in_ptr60 + x0, xmask)
tmp298 = tl.load(in_ptr61 + x0, xmask)
tmp305 = tl.load(in_out_ptr20 + x0, xmask)
tmp307 = tl.load(in_ptr62 + x0, xmask)
tmp310 = tl.load(in_ptr63 + x0, xmask)
tmp313 = tl.load(in_ptr64 + x0, xmask)
tmp320 = tl.load(in_out_ptr21 + x0, xmask)
tmp322 = tl.load(in_ptr65 + x0, xmask)
tmp325 = tl.load(in_ptr66 + x0, xmask)
tmp328 = tl.load(in_ptr67 + x0, xmask)
tmp335 = tl.load(in_out_ptr22 + x0, xmask)
tmp337 = tl.load(in_ptr68 + x0, xmask)
tmp340 = tl.load(in_ptr69 + x0, xmask)
tmp343 = tl.load(in_ptr70 + x0, xmask)
tmp350 = tl.load(in_out_ptr23 + x0, xmask)
tmp352 = tl.load(in_ptr71 + x0, xmask)
tmp355 = tl.load(in_ptr72 + x0, xmask)
tmp358 = tl.load(in_ptr73 + x0, xmask)
tmp365 = tl.load(in_out_ptr24 + x0, xmask)
tmp367 = tl.load(in_ptr74 + x0, xmask)
tmp370 = tl.load(in_ptr75 + x0, xmask)
tmp373 = tl.load(in_ptr76 + x0, xmask)
tmp380 = tl.load(in_out_ptr25 + x0, xmask)
tmp382 = tl.load(in_ptr77 + x0, xmask)
tmp385 = tl.load(in_ptr78 + x0, xmask)
tmp388 = tl.load(in_ptr79 + x0, xmask)
tmp395 = tl.load(in_out_ptr26 + x0, xmask)
tmp397 = tl.load(in_ptr80 + x0, xmask)
tmp400 = tl.load(in_ptr81 + x0, xmask)
tmp403 = tl.load(in_ptr82 + x0, xmask)
tmp410 = tl.load(in_out_ptr27 + x0, xmask)
tmp412 = tl.load(in_ptr83 + x0, xmask)
tmp415 = tl.load(in_ptr84 + x0, xmask)
tmp418 = tl.load(in_ptr85 + x0, xmask)
tmp425 = tl.load(in_out_ptr28 + x0, xmask)
tmp427 = tl.load(in_ptr86 + x0, xmask)
tmp430 = tl.load(in_ptr87 + x0, xmask)
tmp433 = tl.load(in_ptr88 + x0, xmask)
tmp440 = tl.load(in_out_ptr29 + x0, xmask)
tmp442 = tl.load(in_ptr89 + x0, xmask)
tmp445 = tl.load(in_ptr90 + x0, xmask)
tmp448 = tl.load(in_ptr91 + x0, xmask)
tmp455 = tl.load(in_out_ptr30 + x0, xmask)
tmp457 = tl.load(in_ptr92 + x0, xmask)
tmp460 = tl.load(in_ptr93 + x0, xmask)
tmp463 = tl.load(in_ptr94 + x0, xmask)
tmp470 = tl.load(in_out_ptr31 + x0, xmask)
tmp472 = tl.load(in_ptr95 + x0, xmask)
tmp475 = tl.load(in_ptr96 + x0, xmask)
tmp478 = tl.load(in_ptr97 + x0, xmask)
tmp485 = tl.load(in_out_ptr32 + x0, xmask)
tmp487 = tl.load(in_ptr98 + x0, xmask)
tmp490 = tl.load(in_ptr99 + x0, xmask)
tmp493 = tl.load(in_ptr100 + x0, xmask)
tmp500 = tl.load(in_out_ptr33 + x0, xmask)
tmp502 = tl.load(in_ptr101 + x0, xmask)
tmp505 = tl.load(in_ptr102 + x0, xmask)
tmp508 = tl.load(in_ptr103 + x0, xmask)
tmp515 = tl.load(in_out_ptr34 + x0, xmask)
tmp517 = tl.load(in_ptr104 + x0, xmask)
tmp520 = tl.load(in_ptr105 + x0, xmask)
tmp523 = tl.load(in_ptr106 + x0, xmask)
tmp530 = tl.load(in_out_ptr35 + x0, xmask)
tmp532 = tl.load(in_ptr107 + x0, xmask)
tmp535 = tl.load(in_ptr108 + x0, xmask)
tmp538 = tl.load(in_ptr109 + x0, xmask)
tmp545 = tl.load(in_out_ptr36 + x0, xmask)
tmp547 = tl.load(in_ptr110 + x0, xmask)
tmp550 = tl.load(in_ptr111 + x0, xmask)
tmp553 = tl.load(in_ptr112 + x0, xmask)
tmp560 = tl.load(in_out_ptr37 + x0, xmask)
tmp562 = tl.load(in_ptr113 + x0, xmask)
tmp565 = tl.load(in_ptr114 + x0, xmask)
tmp568 = tl.load(in_ptr115 + x0, xmask)
tmp575 = tl.load(in_out_ptr38 + x0, xmask)
tmp577 = tl.load(in_ptr116 + x0, xmask)
tmp580 = tl.load(in_ptr117 + x0, xmask)
tmp583 = tl.load(in_ptr118 + x0, xmask)
tmp590 = tl.load(in_out_ptr39 + x0, xmask)
tmp592 = tl.load(in_ptr119 + x0, xmask)
tmp595 = tl.load(in_ptr120 + x0, xmask)
tmp598 = tl.load(in_ptr121 + x0, xmask)
tmp605 = tl.load(in_out_ptr40 + x0, xmask)
tmp607 = tl.load(in_ptr122 + x0, xmask)
tmp610 = tl.load(in_ptr123 + x0, xmask)
tmp613 = tl.load(in_ptr124 + x0, xmask)
tmp620 = tl.load(in_out_ptr41 + x0, xmask)
tmp622 = tl.load(in_ptr125 + x0, xmask)
tmp625 = tl.load(in_ptr126 + x0, xmask)
tmp628 = tl.load(in_ptr127 + x0, xmask)
tmp635 = tl.load(in_out_ptr42 + x0, xmask)
tmp637 = tl.load(in_ptr128 + x0, xmask)
tmp640 = tl.load(in_ptr129 + x0, xmask)
tmp643 = tl.load(in_ptr130 + x0, xmask)
tmp650 = tl.load(in_out_ptr43 + x0, xmask)
tmp652 = tl.load(in_ptr131 + x0, xmask)
tmp655 = tl.load(in_ptr132 + x0, xmask)
tmp658 = tl.load(in_ptr133 + x0, xmask)
tmp665 = tl.load(in_out_ptr44 + x0, xmask)
tmp667 = tl.load(in_ptr134 + x0, xmask)
tmp670 = tl.load(in_ptr135 + x0, xmask)
tmp673 = tl.load(in_ptr136 + x0, xmask)
tmp680 = tl.load(in_out_ptr45 + x0, xmask)
tmp682 = tl.load(in_ptr137 + x0, xmask)
tmp685 = tl.load(in_ptr138 + x0, xmask)
tmp688 = tl.load(in_ptr139 + x0, xmask)
tmp695 = tl.load(in_out_ptr46 + x0, xmask)
tmp697 = tl.load(in_ptr140 + x0, xmask)
tmp700 = tl.load(in_ptr141 + x0, xmask)
tmp703 = tl.load(in_ptr142 + x0, xmask)
tmp710 = tl.load(in_out_ptr47 + x0, xmask)
tmp712 = tl.load(in_ptr143 + x0, xmask)
tmp715 = tl.load(in_ptr144 + x0, xmask)
tmp718 = tl.load(in_ptr145 + x0, xmask)
tmp725 = tl.load(in_out_ptr48 + x0, xmask)
tmp727 = tl.load(in_ptr146 + x0, xmask)
tmp730 = tl.load(in_ptr147 + x0, xmask)
tmp733 = tl.load(in_ptr148 + x0, xmask)
tmp740 = tl.load(in_out_ptr49 + x0, xmask)
tmp742 = tl.load(in_ptr149 + x0, xmask)
tmp745 = tl.load(in_ptr150 + x0, xmask)
tmp748 = tl.load(in_ptr151 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = -tmp5
tmp8 = -tmp7
tmp9 = triton_helpers.minimum(tmp6, tmp8)
tmp11 = -tmp10
tmp12 = triton_helpers.minimum(tmp9, tmp11)
tmp14 = tmp12 - tmp13
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp4 * tmp15
tmp17 = tmp15 - tmp16
tmp18 = triton_helpers.maximum(tmp3, tmp17)
tmp19 = tmp4 + tmp18
tmp21 = -tmp20
tmp23 = -tmp22
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp26 = -tmp25
tmp27 = triton_helpers.minimum(tmp24, tmp26)
tmp29 = tmp27 - tmp28
tmp30 = triton_helpers.maximum(tmp3, tmp29)
tmp31 = tmp19 * tmp30
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp3, tmp32)
tmp34 = tmp19 + tmp33
tmp36 = -tmp35
tmp38 = -tmp37
tmp39 = triton_helpers.minimum(tmp36, tmp38)
tmp41 = -tmp40
tmp42 = triton_helpers.minimum(tmp39, tmp41)
tmp44 = tmp42 - tmp43
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp34 * tmp45
tmp47 = tmp45 - tmp46
tmp48 = triton_helpers.maximum(tmp3, tmp47)
tmp49 = tmp34 + tmp48
tmp51 = -tmp50
tmp53 = -tmp52
tmp54 = triton_helpers.minimum(tmp51, tmp53)
tmp56 = -tmp55
tmp57 = triton_helpers.minimum(tmp54, tmp56)
tmp59 = tmp57 - tmp58
tmp60 = triton_helpers.maximum(tmp3, tmp59)
tmp61 = tmp49 * tmp60
tmp62 = tmp60 - tmp61
tmp63 = triton_helpers.maximum(tmp3, tmp62)
tmp64 = tmp49 + tmp63
tmp66 = -tmp65
tmp68 = -tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp71 = -tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp74 = tmp72 - tmp73
tmp75 = triton_helpers.maximum(tmp3, tmp74)
tmp76 = tmp64 * tmp75
tmp77 = tmp75 - tmp76
tmp78 = triton_helpers.maximum(tmp3, tmp77)
tmp79 = tmp64 + tmp78
tmp81 = -tmp80
tmp83 = -tmp82
tmp84 = triton_helpers.minimum(tmp81, tmp83)
tmp86 = -tmp85
tmp87 = triton_helpers.minimum(tmp84, tmp86)
tmp89 = tmp87 - tmp88
tmp90 = triton_helpers.maximum(tmp3, tmp89)
tmp91 = tmp79 * tmp90
tmp92 = tmp90 - tmp91
tmp93 = triton_helpers.maximum(tmp3, tmp92)
tmp94 = tmp79 + tmp93
tmp96 = -tmp95
tmp98 = -tmp97
tmp99 = triton_helpers.minimum(tmp96, tmp98)
tmp101 = -tmp100
tmp102 = triton_helpers.minimum(tmp99, tmp101)
tmp104 = tmp102 - tmp103
tmp105 = triton_helpers.maximum(tmp3, tmp104)
tmp106 = tmp94 * tmp105
tmp107 = tmp105 - tmp106
tmp108 = triton_helpers.maximum(tmp3, tmp107)
tmp109 = tmp94 + tmp108
tmp111 = -tmp110
tmp113 = -tmp112
tmp114 = triton_helpers.minimum(tmp111, tmp113)
tmp116 = -tmp115
tmp117 = triton_helpers.minimum(tmp114, tmp116)
tmp119 = tmp117 - tmp118
tmp120 = triton_helpers.maximum(tmp3, tmp119)
tmp121 = tmp109 * tmp120
tmp122 = tmp120 - tmp121
tmp123 = triton_helpers.maximum(tmp3, tmp122)
tmp124 = tmp109 + tmp123
tmp126 = -tmp125
tmp128 = -tmp127
tmp129 = triton_helpers.minimum(tmp126, tmp128)
tmp131 = -tmp130
tmp132 = triton_helpers.minimum(tmp129, tmp131)
tmp134 = tmp132 - tmp133
tmp135 = triton_helpers.maximum(tmp3, tmp134)
tmp136 = tmp124 * tmp135
tmp137 = tmp135 - tmp136
tmp138 = triton_helpers.maximum(tmp3, tmp137)
tmp139 = tmp124 + tmp138
tmp141 = -tmp140
tmp143 = -tmp142
tmp144 = triton_helpers.minimum(tmp141, tmp143)
tmp146 = -tmp145
tmp147 = triton_helpers.minimum(tmp144, tmp146)
tmp149 = tmp147 - tmp148
tmp150 = triton_helpers.maximum(tmp3, tmp149)
tmp151 = tmp139 * tmp150
tmp152 = tmp150 - tmp151
tmp153 = triton_helpers.maximum(tmp3, tmp152)
tmp154 = tmp139 + tmp153
tmp156 = -tmp155
tmp158 = -tmp157
tmp159 = triton_helpers.minimum(tmp156, tmp158)
tmp161 = -tmp160
tmp162 = triton_helpers.minimum(tmp159, tmp161)
tmp164 = tmp162 - tmp163
tmp165 = triton_helpers.maximum(tmp3, tmp164)
tmp166 = tmp154 * tmp165
tmp167 = tmp165 - tmp166
tmp168 = triton_helpers.maximum(tmp3, tmp167)
tmp169 = tmp154 + tmp168
tmp171 = -tmp170
tmp173 = -tmp172
tmp174 = triton_helpers.minimum(tmp171, tmp173)
tmp176 = -tmp175
tmp177 = triton_helpers.minimum(tmp174, tmp176)
tmp179 = tmp177 - tmp178
tmp180 = triton_helpers.maximum(tmp3, tmp179)
tmp181 = tmp169 * tmp180
tmp182 = tmp180 - tmp181
tmp183 = triton_helpers.maximum(tmp3, tmp182)
tmp184 = tmp169 + tmp183
tmp186 = -tmp185
tmp188 = -tmp187
tmp189 = triton_helpers.minimum(tmp186, tmp188)
tmp191 = -tmp190
tmp192 = triton_helpers.minimum(tmp189, tmp191)
tmp194 = tmp192 - tmp193
tmp195 = triton_helpers.maximum(tmp3, tmp194)
tmp196 = tmp184 * tmp195
tmp197 = tmp195 - tmp196
tmp198 = triton_helpers.maximum(tmp3, tmp197)
tmp199 = tmp184 + tmp198
tmp201 = -tmp200
tmp203 = -tmp202
tmp204 = triton_helpers.minimum(tmp201, tmp203)
tmp206 = -tmp205
tmp207 = triton_helpers.minimum(tmp204, tmp206)
tmp209 = tmp207 - tmp208
tmp210 = triton_helpers.maximum(tmp3, tmp209)
tmp211 = tmp199 * tmp210
tmp212 = tmp210 - tmp211
tmp213 = triton_helpers.maximum(tmp3, tmp212)
tmp214 = tmp199 + tmp213
tmp216 = -tmp215
tmp218 = -tmp217
tmp219 = triton_helpers.minimum(tmp216, tmp218)
tmp221 = -tmp220
tmp222 = triton_helpers.minimum(tmp219, tmp221)
tmp224 = tmp222 - tmp223
tmp225 = triton_helpers.maximum(tmp3, tmp224)
tmp226 = tmp214 * tmp225
tmp227 = tmp225 - tmp226
tmp228 = triton_helpers.maximum(tmp3, tmp227)
tmp229 = tmp214 + tmp228
tmp231 = -tmp230
tmp233 = -tmp232
tmp234 = triton_helpers.minimum(tmp231, tmp233)
tmp236 = -tmp235
tmp237 = triton_helpers.minimum(tmp234, tmp236)
tmp239 = tmp237 - tmp238
tmp240 = triton_helpers.maximum(tmp3, tmp239)
tmp241 = tmp229 * tmp240
tmp242 = tmp240 - tmp241
tmp243 = triton_helpers.maximum(tmp3, tmp242)
tmp244 = tmp229 + tmp243
tmp246 = -tmp245
tmp248 = -tmp247
tmp249 = triton_helpers.minimum(tmp246, tmp248)
tmp251 = -tmp250
tmp252 = triton_helpers.minimum(tmp249, tmp251)
tmp254 = tmp252 - tmp253
tmp255 = triton_helpers.maximum(tmp3, tmp254)
tmp256 = tmp244 * tmp255
tmp257 = tmp255 - tmp256
tmp258 = triton_helpers.maximum(tmp3, tmp257)
tmp259 = tmp244 + tmp258
tmp261 = -tmp260
tmp263 = -tmp262
tmp264 = triton_helpers.minimum(tmp261, tmp263)
tmp266 = -tmp265
tmp267 = triton_helpers.minimum(tmp264, tmp266)
tmp269 = tmp267 - tmp268
tmp270 = triton_helpers.maximum(tmp3, tmp269)
tmp271 = tmp259 * tmp270
tmp272 = tmp270 - tmp271
tmp273 = triton_helpers.maximum(tmp3, tmp272)
tmp274 = tmp259 + tmp273
tmp276 = -tmp275
tmp278 = -tmp277
tmp279 = triton_helpers.minimum(tmp276, tmp278)
tmp281 = -tmp280
tmp282 = triton_helpers.minimum(tmp279, tmp281)
tmp284 = tmp282 - tmp283
tmp285 = triton_helpers.maximum(tmp3, tmp284)
tmp286 = tmp274 * tmp285
tmp287 = tmp285 - tmp286
tmp288 = triton_helpers.maximum(tmp3, tmp287)
tmp289 = tmp274 + tmp288
tmp291 = -tmp290
tmp293 = -tmp292
tmp294 = triton_helpers.minimum(tmp291, tmp293)
tmp296 = -tmp295
tmp297 = triton_helpers.minimum(tmp294, tmp296)
tmp299 = tmp297 - tmp298
tmp300 = triton_helpers.maximum(tmp3, tmp299)
tmp301 = tmp289 * tmp300
tmp302 = tmp300 - tmp301
tmp303 = triton_helpers.maximum(tmp3, tmp302)
tmp304 = tmp289 + tmp303
tmp306 = -tmp305
tmp308 = -tmp307
tmp309 = triton_helpers.minimum(tmp306, tmp308)
tmp311 = -tmp310
tmp312 = triton_helpers.minimum(tmp309, tmp311)
tmp314 = tmp312 - tmp313
tmp315 = triton_helpers.maximum(tmp3, tmp314)
tmp316 = tmp304 * tmp315
tmp317 = tmp315 - tmp316
tmp318 = triton_helpers.maximum(tmp3, tmp317)
tmp319 = tmp304 + tmp318
tmp321 = -tmp320
tmp323 = -tmp322
tmp324 = triton_helpers.minimum(tmp321, tmp323)
tmp326 = -tmp325
tmp327 = triton_helpers.minimum(tmp324, tmp326)
tmp329 = tmp327 - tmp328
tmp330 = triton_helpers.maximum(tmp3, tmp329)
tmp331 = tmp319 * tmp330
tmp332 = tmp330 - tmp331
tmp333 = triton_helpers.maximum(tmp3, tmp332)
tmp334 = tmp319 + tmp333
tmp336 = -tmp335
tmp338 = -tmp337
tmp339 = triton_helpers.minimum(tmp336, tmp338)
tmp341 = -tmp340
tmp342 = triton_helpers.minimum(tmp339, tmp341)
tmp344 = tmp342 - tmp343
tmp345 = triton_helpers.maximum(tmp3, tmp344)
tmp346 = tmp334 * tmp345
tmp347 = tmp345 - tmp346
tmp348 = triton_helpers.maximum(tmp3, tmp347)
tmp349 = tmp334 + tmp348
tmp351 = -tmp350
tmp353 = -tmp352
tmp354 = triton_helpers.minimum(tmp351, tmp353)
tmp356 = -tmp355
tmp357 = triton_helpers.minimum(tmp354, tmp356)
tmp359 = tmp357 - tmp358
tmp360 = triton_helpers.maximum(tmp3, tmp359)
tmp361 = tmp349 * tmp360
tmp362 = tmp360 - tmp361
tmp363 = triton_helpers.maximum(tmp3, tmp362)
tmp364 = tmp349 + tmp363
tmp366 = -tmp365
tmp368 = -tmp367
tmp369 = triton_helpers.minimum(tmp366, tmp368)
tmp371 = -tmp370
tmp372 = triton_helpers.minimum(tmp369, tmp371)
tmp374 = tmp372 - tmp373
tmp375 = triton_helpers.maximum(tmp3, tmp374)
tmp376 = tmp364 * tmp375
tmp377 = tmp375 - tmp376
tmp378 = triton_helpers.maximum(tmp3, tmp377)
tmp379 = tmp364 + tmp378
tmp381 = -tmp380
tmp383 = -tmp382
tmp384 = triton_helpers.minimum(tmp381, tmp383)
tmp386 = -tmp385
tmp387 = triton_helpers.minimum(tmp384, tmp386)
tmp389 = tmp387 - tmp388
tmp390 = triton_helpers.maximum(tmp3, tmp389)
tmp391 = tmp379 * tmp390
tmp392 = tmp390 - tmp391
tmp393 = triton_helpers.maximum(tmp3, tmp392)
tmp394 = tmp379 + tmp393
tmp396 = -tmp395
tmp398 = -tmp397
tmp399 = triton_helpers.minimum(tmp396, tmp398)
tmp401 = -tmp400
tmp402 = triton_helpers.minimum(tmp399, tmp401)
tmp404 = tmp402 - tmp403
tmp405 = triton_helpers.maximum(tmp3, tmp404)
tmp406 = tmp394 * tmp405
tmp407 = tmp405 - tmp406
tmp408 = triton_helpers.maximum(tmp3, tmp407)
tmp409 = tmp394 + tmp408
tmp411 = -tmp410
tmp413 = -tmp412
tmp414 = triton_helpers.minimum(tmp411, tmp413)
tmp416 = -tmp415
tmp417 = triton_helpers.minimum(tmp414, tmp416)
tmp419 = tmp417 - tmp418
tmp420 = triton_helpers.maximum(tmp3, tmp419)
tmp421 = tmp409 * tmp420
tmp422 = tmp420 - tmp421
tmp423 = triton_helpers.maximum(tmp3, tmp422)
tmp424 = tmp409 + tmp423
tmp426 = -tmp425
tmp428 = -tmp427
tmp429 = triton_helpers.minimum(tmp426, tmp428)
tmp431 = -tmp430
tmp432 = triton_helpers.minimum(tmp429, tmp431)
tmp434 = tmp432 - tmp433
tmp435 = triton_helpers.maximum(tmp3, tmp434)
tmp436 = tmp424 * tmp435
tmp437 = tmp435 - tmp436
tmp438 = triton_helpers.maximum(tmp3, tmp437)
tmp439 = tmp424 + tmp438
tmp441 = -tmp440
tmp443 = -tmp442
tmp444 = triton_helpers.minimum(tmp441, tmp443)
tmp446 = -tmp445
tmp447 = triton_helpers.minimum(tmp444, tmp446)
tmp449 = tmp447 - tmp448
tmp450 = triton_helpers.maximum(tmp3, tmp449)
tmp451 = tmp439 * tmp450
tmp452 = tmp450 - tmp451
tmp453 = triton_helpers.maximum(tmp3, tmp452)
tmp454 = tmp439 + tmp453
tmp456 = -tmp455
tmp458 = -tmp457
tmp459 = triton_helpers.minimum(tmp456, tmp458)
tmp461 = -tmp460
tmp462 = triton_helpers.minimum(tmp459, tmp461)
tmp464 = tmp462 - tmp463
tmp465 = triton_helpers.maximum(tmp3, tmp464)
tmp466 = tmp454 * tmp465
tmp467 = tmp465 - tmp466
tmp468 = triton_helpers.maximum(tmp3, tmp467)
tmp469 = tmp454 + tmp468
tmp471 = -tmp470
tmp473 = -tmp472
tmp474 = triton_helpers.minimum(tmp471, tmp473)
tmp476 = -tmp475
tmp477 = triton_helpers.minimum(tmp474, tmp476)
tmp479 = tmp477 - tmp478
tmp480 = triton_helpers.maximum(tmp3, tmp479)
tmp481 = tmp469 * tmp480
tmp482 = tmp480 - tmp481
tmp483 = triton_helpers.maximum(tmp3, tmp482)
tmp484 = tmp469 + tmp483
tmp486 = -tmp485
tmp488 = -tmp487
tmp489 = triton_helpers.minimum(tmp486, tmp488)
tmp491 = -tmp490
tmp492 = triton_helpers.minimum(tmp489, tmp491)
tmp494 = tmp492 - tmp493
tmp495 = triton_helpers.maximum(tmp3, tmp494)
tmp496 = tmp484 * tmp495
tmp497 = tmp495 - tmp496
tmp498 = triton_helpers.maximum(tmp3, tmp497)
tmp499 = tmp484 + tmp498
tmp501 = -tmp500
tmp503 = -tmp502
tmp504 = triton_helpers.minimum(tmp501, tmp503)
tmp506 = -tmp505
tmp507 = triton_helpers.minimum(tmp504, tmp506)
tmp509 = tmp507 - tmp508
tmp510 = triton_helpers.maximum(tmp3, tmp509)
tmp511 = tmp499 * tmp510
tmp512 = tmp510 - tmp511
tmp513 = triton_helpers.maximum(tmp3, tmp512)
tmp514 = tmp499 + tmp513
tmp516 = -tmp515
tmp518 = -tmp517
tmp519 = triton_helpers.minimum(tmp516, tmp518)
tmp521 = -tmp520
tmp522 = triton_helpers.minimum(tmp519, tmp521)
tmp524 = tmp522 - tmp523
tmp525 = triton_helpers.maximum(tmp3, tmp524)
tmp526 = tmp514 * tmp525
tmp527 = tmp525 - tmp526
tmp528 = triton_helpers.maximum(tmp3, tmp527)
tmp529 = tmp514 + tmp528
tmp531 = -tmp530
tmp533 = -tmp532
tmp534 = triton_helpers.minimum(tmp531, tmp533)
tmp536 = -tmp535
tmp537 = triton_helpers.minimum(tmp534, tmp536)
tmp539 = tmp537 - tmp538
tmp540 = triton_helpers.maximum(tmp3, tmp539)
tmp541 = tmp529 * tmp540
tmp542 = tmp540 - tmp541
tmp543 = triton_helpers.maximum(tmp3, tmp542)
tmp544 = tmp529 + tmp543
tmp546 = -tmp545
tmp548 = -tmp547
tmp549 = triton_helpers.minimum(tmp546, tmp548)
tmp551 = -tmp550
tmp552 = triton_helpers.minimum(tmp549, tmp551)
tmp554 = tmp552 - tmp553
tmp555 = triton_helpers.maximum(tmp3, tmp554)
tmp556 = tmp544 * tmp555
tmp557 = tmp555 - tmp556
tmp558 = triton_helpers.maximum(tmp3, tmp557)
tmp559 = tmp544 + tmp558
tmp561 = -tmp560
tmp563 = -tmp562
tmp564 = triton_helpers.minimum(tmp561, tmp563)
tmp566 = -tmp565
tmp567 = triton_helpers.minimum(tmp564, tmp566)
tmp569 = tmp567 - tmp568
tmp570 = triton_helpers.maximum(tmp3, tmp569)
tmp571 = tmp559 * tmp570
tmp572 = tmp570 - tmp571
tmp573 = triton_helpers.maximum(tmp3, tmp572)
tmp574 = tmp559 + tmp573
tmp576 = -tmp575
tmp578 = -tmp577
tmp579 = triton_helpers.minimum(tmp576, tmp578)
tmp581 = -tmp580
tmp582 = triton_helpers.minimum(tmp579, tmp581)
tmp584 = tmp582 - tmp583
tmp585 = triton_helpers.maximum(tmp3, tmp584)
tmp586 = tmp574 * tmp585
tmp587 = tmp585 - tmp586
tmp588 = triton_helpers.maximum(tmp3, tmp587)
tmp589 = tmp574 + tmp588
tmp591 = -tmp590
tmp593 = -tmp592
tmp594 = triton_helpers.minimum(tmp591, tmp593)
tmp596 = -tmp595
tmp597 = triton_helpers.minimum(tmp594, tmp596)
tmp599 = tmp597 - tmp598
tmp600 = triton_helpers.maximum(tmp3, tmp599)
tmp601 = tmp589 * tmp600
tmp602 = tmp600 - tmp601
tmp603 = triton_helpers.maximum(tmp3, tmp602)
tmp604 = tmp589 + tmp603
tmp606 = -tmp605
tmp608 = -tmp607
tmp609 = triton_helpers.minimum(tmp606, tmp608)
tmp611 = -tmp610
tmp612 = triton_helpers.minimum(tmp609, tmp611)
tmp614 = tmp612 - tmp613
tmp615 = triton_helpers.maximum(tmp3, tmp614)
tmp616 = tmp604 * tmp615
tmp617 = tmp615 - tmp616
tmp618 = triton_helpers.maximum(tmp3, tmp617)
tmp619 = tmp604 + tmp618
tmp621 = -tmp620
tmp623 = -tmp622
tmp624 = triton_helpers.minimum(tmp621, tmp623)
tmp626 = -tmp625
tmp627 = triton_helpers.minimum(tmp624, tmp626)
tmp629 = tmp627 - tmp628
tmp630 = triton_helpers.maximum(tmp3, tmp629)
tmp631 = tmp619 * tmp630
tmp632 = tmp630 - tmp631
tmp633 = triton_helpers.maximum(tmp3, tmp632)
tmp634 = tmp619 + tmp633
tmp636 = -tmp635
tmp638 = -tmp637
tmp639 = triton_helpers.minimum(tmp636, tmp638)
tmp641 = -tmp640
tmp642 = triton_helpers.minimum(tmp639, tmp641)
tmp644 = tmp642 - tmp643
tmp645 = triton_helpers.maximum(tmp3, tmp644)
tmp646 = tmp634 * tmp645
tmp647 = tmp645 - tmp646
tmp648 = triton_helpers.maximum(tmp3, tmp647)
tmp649 = tmp634 + tmp648
tmp651 = -tmp650
tmp653 = -tmp652
tmp654 = triton_helpers.minimum(tmp651, tmp653)
tmp656 = -tmp655
tmp657 = triton_helpers.minimum(tmp654, tmp656)
tmp659 = tmp657 - tmp658
tmp660 = triton_helpers.maximum(tmp3, tmp659)
tmp661 = tmp649 * tmp660
tmp662 = tmp660 - tmp661
tmp663 = triton_helpers.maximum(tmp3, tmp662)
tmp664 = tmp649 + tmp663
tmp666 = -tmp665
tmp668 = -tmp667
tmp669 = triton_helpers.minimum(tmp666, tmp668)
tmp671 = -tmp670
tmp672 = triton_helpers.minimum(tmp669, tmp671)
tmp674 = tmp672 - tmp673
tmp675 = triton_helpers.maximum(tmp3, tmp674)
tmp676 = tmp664 * tmp675
tmp677 = tmp675 - tmp676
tmp678 = triton_helpers.maximum(tmp3, tmp677)
tmp679 = tmp664 + tmp678
tmp681 = -tmp680
tmp683 = -tmp682
tmp684 = triton_helpers.minimum(tmp681, tmp683)
tmp686 = -tmp685
tmp687 = triton_helpers.minimum(tmp684, tmp686)
tmp689 = tmp687 - tmp688
tmp690 = triton_helpers.maximum(tmp3, tmp689)
tmp691 = tmp679 * tmp690
tmp692 = tmp690 - tmp691
tmp693 = triton_helpers.maximum(tmp3, tmp692)
tmp694 = tmp679 + tmp693
tmp696 = -tmp695
tmp698 = -tmp697
tmp699 = triton_helpers.minimum(tmp696, tmp698)
tmp701 = -tmp700
tmp702 = triton_helpers.minimum(tmp699, tmp701)
tmp704 = tmp702 - tmp703
tmp705 = triton_helpers.maximum(tmp3, tmp704)
tmp706 = tmp694 * tmp705
tmp707 = tmp705 - tmp706
tmp708 = triton_helpers.maximum(tmp3, tmp707)
tmp709 = tmp694 + tmp708
tmp711 = -tmp710
tmp713 = -tmp712
tmp714 = triton_helpers.minimum(tmp711, tmp713)
tmp716 = -tmp715
tmp717 = triton_helpers.minimum(tmp714, tmp716)
tmp719 = tmp717 - tmp718
tmp720 = triton_helpers.maximum(tmp3, tmp719)
tmp721 = tmp709 * tmp720
tmp722 = tmp720 - tmp721
tmp723 = triton_helpers.maximum(tmp3, tmp722)
tmp724 = tmp709 + tmp723
tmp726 = -tmp725
tmp728 = -tmp727
tmp729 = triton_helpers.minimum(tmp726, tmp728)
tmp731 = -tmp730
tmp732 = triton_helpers.minimum(tmp729, tmp731)
tmp734 = tmp732 - tmp733
tmp735 = triton_helpers.maximum(tmp3, tmp734)
tmp736 = tmp724 * tmp735
tmp737 = tmp735 - tmp736
tmp738 = triton_helpers.maximum(tmp3, tmp737)
tmp739 = tmp724 + tmp738
tmp741 = -tmp740
tmp743 = -tmp742
tmp744 = triton_helpers.minimum(tmp741, tmp743)
tmp746 = -tmp745
tmp747 = triton_helpers.minimum(tmp744, tmp746)
tmp749 = tmp747 - tmp748
tmp750 = triton_helpers.maximum(tmp3, tmp749)
tmp751 = tmp739 * tmp750
tmp752 = tmp750 - tmp751
tmp753 = triton_helpers.maximum(tmp3, tmp752)
tmp754 = tmp739 + tmp753
tl.store(in_out_ptr49 + x0, tmp754, xmask)
@triton.jit
def triton_poi_fused_add_minimum_mul_neg_relu_sub_6(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5,
in_out_ptr6, in_out_ptr7, in_out_ptr8, in_out_ptr9, in_out_ptr10,
in_out_ptr11, in_out_ptr12, in_out_ptr13, in_out_ptr14, in_out_ptr15,
in_out_ptr16, in_out_ptr17, in_out_ptr18, in_out_ptr19, in_out_ptr20,
in_out_ptr21, in_out_ptr22, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18,
in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25,
in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32,
in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39,
in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46,
in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53,
in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60,
in_ptr61, in_ptr62, in_ptr63, in_ptr64, in_ptr65, in_ptr66, in_ptr67,
in_ptr68, in_ptr69, in_ptr70, in_ptr71, in_ptr72, in_ptr73, in_ptr74,
in_ptr75, in_ptr76, in_ptr77, in_ptr78, in_ptr79, in_ptr80, in_ptr81,
in_ptr82, in_ptr83, in_ptr84, in_ptr85, in_ptr86, in_ptr87, in_ptr88,
in_ptr89, in_ptr90, in_ptr91, in_ptr92, in_ptr93, in_ptr94, in_ptr95,
in_ptr96, in_ptr97, in_ptr98, in_ptr99, in_ptr100, in_ptr101, in_ptr102,
in_ptr103, in_ptr104, in_ptr105, in_ptr106, in_ptr107, in_ptr108,
in_ptr109, in_ptr110, in_ptr111, in_ptr112, in_ptr113, in_ptr114,
in_ptr115, in_ptr116, in_ptr117, in_ptr118, in_ptr119, in_ptr120,
in_ptr121, in_ptr122, in_ptr123, in_ptr124, in_ptr125, in_ptr126,
in_ptr127, in_ptr128, in_ptr129, in_ptr130, in_ptr131, in_ptr132,
in_ptr133, in_ptr134, in_ptr135, in_ptr136, in_ptr137, in_ptr138,
in_ptr139, in_ptr140, in_ptr141, in_ptr142, in_ptr143, in_ptr144,
in_ptr145, in_ptr146, in_ptr147, in_ptr148, in_ptr149, in_ptr150,
in_ptr151, in_ptr152, in_ptr153, in_ptr154, in_ptr155, in_ptr156,
in_ptr157, in_ptr158, in_ptr159, in_ptr160, in_ptr161, in_ptr162,
in_ptr163, in_ptr164, in_ptr165, in_ptr166, in_ptr167, in_ptr168,
in_ptr169, in_ptr170, in_ptr171, in_ptr172, in_ptr173, in_ptr174,
in_ptr175, in_ptr176, in_ptr177, in_ptr178, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp10 = tl.load(in_ptr3 + x0, xmask)
tmp13 = tl.load(in_ptr4 + x0, xmask)
tmp20 = tl.load(in_ptr5 + x0, xmask)
tmp22 = tl.load(in_ptr6 + x0, xmask)
tmp25 = tl.load(in_ptr7 + x0, xmask)
tmp28 = tl.load(in_ptr8 + x0, xmask)
tmp35 = tl.load(in_ptr9 + x0, xmask)
tmp37 = tl.load(in_ptr10 + x0, xmask)
tmp40 = tl.load(in_ptr11 + x0, xmask)
tmp43 = tl.load(in_ptr12 + x0, xmask)
tmp50 = tl.load(in_out_ptr1 + x0, xmask)
tmp52 = tl.load(in_ptr13 + x0, xmask)
tmp55 = tl.load(in_ptr14 + x0, xmask)
tmp58 = tl.load(in_ptr15 + x0, xmask)
tmp65 = tl.load(in_out_ptr2 + x0, xmask)
tmp67 = tl.load(in_ptr16 + x0, xmask)
tmp70 = tl.load(in_ptr17 + x0, xmask)
tmp73 = tl.load(in_ptr18 + x0, xmask)
tmp80 = tl.load(in_out_ptr3 + x0, xmask)
tmp82 = tl.load(in_ptr19 + x0, xmask)
tmp85 = tl.load(in_ptr20 + x0, xmask)
tmp88 = tl.load(in_ptr21 + x0, xmask)
tmp95 = tl.load(in_out_ptr4 + x0, xmask)
tmp97 = tl.load(in_ptr22 + x0, xmask)
tmp100 = tl.load(in_ptr23 + x0, xmask)
tmp103 = tl.load(in_ptr24 + x0, xmask)
tmp110 = tl.load(in_out_ptr5 + x0, xmask)
tmp112 = tl.load(in_ptr25 + x0, xmask)
tmp115 = tl.load(in_ptr26 + x0, xmask)
tmp118 = tl.load(in_ptr27 + x0, xmask)
tmp125 = tl.load(in_out_ptr6 + x0, xmask)
tmp127 = tl.load(in_ptr28 + x0, xmask)
tmp130 = tl.load(in_ptr29 + x0, xmask)
tmp133 = tl.load(in_ptr30 + x0, xmask)
tmp140 = tl.load(in_out_ptr7 + x0, xmask)
tmp142 = tl.load(in_ptr31 + x0, xmask)
tmp145 = tl.load(in_ptr32 + x0, xmask)
tmp148 = tl.load(in_ptr33 + x0, xmask)
tmp155 = tl.load(in_ptr34 + x0, xmask)
tmp157 = tl.load(in_ptr35 + x0, xmask)
tmp160 = tl.load(in_ptr36 + x0, xmask)
tmp163 = tl.load(in_ptr37 + x0, xmask)
tmp170 = tl.load(in_ptr38 + x0, xmask)
tmp172 = tl.load(in_ptr39 + x0, xmask)
tmp175 = tl.load(in_ptr40 + x0, xmask)
tmp178 = tl.load(in_ptr41 + x0, xmask)
tmp185 = tl.load(in_ptr42 + x0, xmask)
tmp187 = tl.load(in_ptr43 + x0, xmask)
tmp190 = tl.load(in_ptr44 + x0, xmask)
tmp193 = tl.load(in_ptr45 + x0, xmask)
tmp200 = tl.load(in_ptr46 + x0, xmask)
tmp202 = tl.load(in_ptr47 + x0, xmask)
tmp205 = tl.load(in_ptr48 + x0, xmask)
tmp208 = tl.load(in_ptr49 + x0, xmask)
tmp215 = tl.load(in_ptr50 + x0, xmask)
tmp217 = tl.load(in_ptr51 + x0, xmask)
tmp220 = tl.load(in_ptr52 + x0, xmask)
tmp223 = tl.load(in_ptr53 + x0, xmask)
tmp230 = tl.load(in_ptr54 + x0, xmask)
tmp232 = tl.load(in_ptr55 + x0, xmask)
tmp235 = tl.load(in_ptr56 + x0, xmask)
tmp238 = tl.load(in_ptr57 + x0, xmask)
tmp245 = tl.load(in_ptr58 + x0, xmask)
tmp247 = tl.load(in_ptr59 + x0, xmask)
tmp250 = tl.load(in_ptr60 + x0, xmask)
tmp253 = tl.load(in_ptr61 + x0, xmask)
tmp260 = tl.load(in_ptr62 + x0, xmask)
tmp262 = tl.load(in_ptr63 + x0, xmask)
tmp265 = tl.load(in_ptr64 + x0, xmask)
tmp268 = tl.load(in_ptr65 + x0, xmask)
tmp275 = tl.load(in_ptr66 + x0, xmask)
tmp277 = tl.load(in_ptr67 + x0, xmask)
tmp280 = tl.load(in_ptr68 + x0, xmask)
tmp283 = tl.load(in_ptr69 + x0, xmask)
tmp290 = tl.load(in_ptr70 + x0, xmask)
tmp292 = tl.load(in_ptr71 + x0, xmask)
tmp295 = tl.load(in_ptr72 + x0, xmask)
tmp298 = tl.load(in_ptr73 + x0, xmask)
tmp305 = tl.load(in_ptr74 + x0, xmask)
tmp307 = tl.load(in_ptr75 + x0, xmask)
tmp310 = tl.load(in_ptr76 + x0, xmask)
tmp313 = tl.load(in_ptr77 + x0, xmask)
tmp320 = tl.load(in_ptr78 + x0, xmask)
tmp322 = tl.load(in_ptr79 + x0, xmask)
tmp325 = tl.load(in_ptr80 + x0, xmask)
tmp328 = tl.load(in_ptr81 + x0, xmask)
tmp335 = tl.load(in_ptr82 + x0, xmask)
tmp337 = tl.load(in_ptr83 + x0, xmask)
tmp340 = tl.load(in_ptr84 + x0, xmask)
tmp343 = tl.load(in_ptr85 + x0, xmask)
tmp350 = tl.load(in_ptr86 + x0, xmask)
tmp352 = tl.load(in_ptr87 + x0, xmask)
tmp355 = tl.load(in_ptr88 + x0, xmask)
tmp358 = tl.load(in_ptr89 + x0, xmask)
tmp365 = tl.load(in_ptr90 + x0, xmask)
tmp367 = tl.load(in_ptr91 + x0, xmask)
tmp370 = tl.load(in_ptr92 + x0, xmask)
tmp373 = tl.load(in_ptr93 + x0, xmask)
tmp380 = tl.load(in_ptr94 + x0, xmask)
tmp382 = tl.load(in_ptr95 + x0, xmask)
tmp385 = tl.load(in_ptr96 + x0, xmask)
tmp388 = tl.load(in_ptr97 + x0, xmask)
tmp395 = tl.load(in_ptr98 + x0, xmask)
tmp397 = tl.load(in_ptr99 + x0, xmask)
tmp400 = tl.load(in_ptr100 + x0, xmask)
tmp403 = tl.load(in_ptr101 + x0, xmask)
tmp410 = tl.load(in_ptr102 + x0, xmask)
tmp412 = tl.load(in_ptr103 + x0, xmask)
tmp415 = tl.load(in_ptr104 + x0, xmask)
tmp418 = tl.load(in_ptr105 + x0, xmask)
tmp425 = tl.load(in_ptr106 + x0, xmask)
tmp427 = tl.load(in_ptr107 + x0, xmask)
tmp430 = tl.load(in_ptr108 + x0, xmask)
tmp433 = tl.load(in_ptr109 + x0, xmask)
tmp440 = tl.load(in_ptr110 + x0, xmask)
tmp442 = tl.load(in_ptr111 + x0, xmask)
tmp445 = tl.load(in_ptr112 + x0, xmask)
tmp448 = tl.load(in_ptr113 + x0, xmask)
tmp455 = tl.load(in_ptr114 + x0, xmask)
tmp457 = tl.load(in_ptr115 + x0, xmask)
tmp460 = tl.load(in_ptr116 + x0, xmask)
tmp463 = tl.load(in_ptr117 + x0, xmask)
tmp470 = tl.load(in_ptr118 + x0, xmask)
tmp472 = tl.load(in_ptr119 + x0, xmask)
tmp475 = tl.load(in_ptr120 + x0, xmask)
tmp478 = tl.load(in_ptr121 + x0, xmask)
tmp485 = tl.load(in_ptr122 + x0, xmask)
tmp487 = tl.load(in_ptr123 + x0, xmask)
tmp490 = tl.load(in_ptr124 + x0, xmask)
tmp493 = tl.load(in_ptr125 + x0, xmask)
tmp500 = tl.load(in_ptr126 + x0, xmask)
tmp502 = tl.load(in_ptr127 + x0, xmask)
tmp505 = tl.load(in_ptr128 + x0, xmask)
tmp508 = tl.load(in_ptr129 + x0, xmask)
tmp515 = tl.load(in_ptr130 + x0, xmask)
tmp517 = tl.load(in_ptr131 + x0, xmask)
tmp520 = tl.load(in_ptr132 + x0, xmask)
tmp523 = tl.load(in_ptr133 + x0, xmask)
tmp530 = tl.load(in_ptr134 + x0, xmask)
tmp532 = tl.load(in_ptr135 + x0, xmask)
tmp535 = tl.load(in_out_ptr8 + x0, xmask)
tmp538 = tl.load(in_ptr136 + x0, xmask)
tmp545 = tl.load(in_out_ptr9 + x0, xmask)
tmp547 = tl.load(in_ptr137 + x0, xmask)
tmp550 = tl.load(in_ptr138 + x0, xmask)
tmp553 = tl.load(in_ptr139 + x0, xmask)
tmp560 = tl.load(in_out_ptr10 + x0, xmask)
tmp562 = tl.load(in_ptr140 + x0, xmask)
tmp565 = tl.load(in_ptr141 + x0, xmask)
tmp568 = tl.load(in_ptr142 + x0, xmask)
tmp575 = tl.load(in_out_ptr11 + x0, xmask)
tmp577 = tl.load(in_ptr143 + x0, xmask)
tmp580 = tl.load(in_ptr144 + x0, xmask)
tmp583 = tl.load(in_ptr145 + x0, xmask)
tmp590 = tl.load(in_out_ptr12 + x0, xmask)
tmp592 = tl.load(in_ptr146 + x0, xmask)
tmp595 = tl.load(in_ptr147 + x0, xmask)
tmp598 = tl.load(in_ptr148 + x0, xmask)
tmp605 = tl.load(in_out_ptr13 + x0, xmask)
tmp607 = tl.load(in_ptr149 + x0, xmask)
tmp610 = tl.load(in_ptr150 + x0, xmask)
tmp613 = tl.load(in_ptr151 + x0, xmask)
tmp620 = tl.load(in_out_ptr14 + x0, xmask)
tmp622 = tl.load(in_ptr152 + x0, xmask)
tmp625 = tl.load(in_ptr153 + x0, xmask)
tmp628 = tl.load(in_ptr154 + x0, xmask)
tmp635 = tl.load(in_out_ptr15 + x0, xmask)
tmp637 = tl.load(in_ptr155 + x0, xmask)
tmp640 = tl.load(in_ptr156 + x0, xmask)
tmp643 = tl.load(in_ptr157 + x0, xmask)
tmp650 = tl.load(in_out_ptr16 + x0, xmask)
tmp652 = tl.load(in_ptr158 + x0, xmask)
tmp655 = tl.load(in_ptr159 + x0, xmask)
tmp658 = tl.load(in_ptr160 + x0, xmask)
tmp665 = tl.load(in_out_ptr17 + x0, xmask)
tmp667 = tl.load(in_ptr161 + x0, xmask)
tmp670 = tl.load(in_ptr162 + x0, xmask)
tmp673 = tl.load(in_ptr163 + x0, xmask)
tmp680 = tl.load(in_out_ptr18 + x0, xmask)
tmp682 = tl.load(in_ptr164 + x0, xmask)
tmp685 = tl.load(in_ptr165 + x0, xmask)
tmp688 = tl.load(in_ptr166 + x0, xmask)
tmp695 = tl.load(in_out_ptr19 + x0, xmask)
tmp697 = tl.load(in_ptr167 + x0, xmask)
tmp700 = tl.load(in_ptr168 + x0, xmask)
tmp703 = tl.load(in_ptr169 + x0, xmask)
tmp710 = tl.load(in_out_ptr20 + x0, xmask)
tmp712 = tl.load(in_ptr170 + x0, xmask)
tmp715 = tl.load(in_ptr171 + x0, xmask)
tmp718 = tl.load(in_ptr172 + x0, xmask)
tmp725 = tl.load(in_out_ptr21 + x0, xmask)
tmp727 = tl.load(in_ptr173 + x0, xmask)
tmp730 = tl.load(in_ptr174 + x0, xmask)
tmp733 = tl.load(in_ptr175 + x0, xmask)
tmp740 = tl.load(in_out_ptr22 + x0, xmask)
tmp742 = tl.load(in_ptr176 + x0, xmask)
tmp745 = tl.load(in_ptr177 + x0, xmask)
tmp748 = tl.load(in_ptr178 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = -tmp5
tmp8 = -tmp7
tmp9 = triton_helpers.minimum(tmp6, tmp8)
tmp11 = -tmp10
tmp12 = triton_helpers.minimum(tmp9, tmp11)
tmp14 = tmp12 - tmp13
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp4 * tmp15
tmp17 = tmp15 - tmp16
tmp18 = triton_helpers.maximum(tmp3, tmp17)
tmp19 = tmp4 + tmp18
tmp21 = -tmp20
tmp23 = -tmp22
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp26 = -tmp25
tmp27 = triton_helpers.minimum(tmp24, tmp26)
tmp29 = tmp27 - tmp28
tmp30 = triton_helpers.maximum(tmp3, tmp29)
tmp31 = tmp19 * tmp30
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp3, tmp32)
tmp34 = tmp19 + tmp33
tmp36 = -tmp35
tmp38 = -tmp37
tmp39 = triton_helpers.minimum(tmp36, tmp38)
tmp41 = -tmp40
tmp42 = triton_helpers.minimum(tmp39, tmp41)
tmp44 = tmp42 - tmp43
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp34 * tmp45
tmp47 = tmp45 - tmp46
tmp48 = triton_helpers.maximum(tmp3, tmp47)
tmp49 = tmp34 + tmp48
tmp51 = -tmp50
tmp53 = -tmp52
tmp54 = triton_helpers.minimum(tmp51, tmp53)
tmp56 = -tmp55
tmp57 = triton_helpers.minimum(tmp54, tmp56)
tmp59 = tmp57 - tmp58
tmp60 = triton_helpers.maximum(tmp3, tmp59)
tmp61 = tmp49 * tmp60
tmp62 = tmp60 - tmp61
tmp63 = triton_helpers.maximum(tmp3, tmp62)
tmp64 = tmp49 + tmp63
tmp66 = -tmp65
tmp68 = -tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp71 = -tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp74 = tmp72 - tmp73
tmp75 = triton_helpers.maximum(tmp3, tmp74)
tmp76 = tmp64 * tmp75
tmp77 = tmp75 - tmp76
tmp78 = triton_helpers.maximum(tmp3, tmp77)
tmp79 = tmp64 + tmp78
tmp81 = -tmp80
tmp83 = -tmp82
tmp84 = triton_helpers.minimum(tmp81, tmp83)
tmp86 = -tmp85
tmp87 = triton_helpers.minimum(tmp84, tmp86)
tmp89 = tmp87 - tmp88
tmp90 = triton_helpers.maximum(tmp3, tmp89)
tmp91 = tmp79 * tmp90
tmp92 = tmp90 - tmp91
tmp93 = triton_helpers.maximum(tmp3, tmp92)
tmp94 = tmp79 + tmp93
tmp96 = -tmp95
tmp98 = -tmp97
tmp99 = triton_helpers.minimum(tmp96, tmp98)
tmp101 = -tmp100
tmp102 = triton_helpers.minimum(tmp99, tmp101)
tmp104 = tmp102 - tmp103
tmp105 = triton_helpers.maximum(tmp3, tmp104)
tmp106 = tmp94 * tmp105
tmp107 = tmp105 - tmp106
tmp108 = triton_helpers.maximum(tmp3, tmp107)
tmp109 = tmp94 + tmp108
tmp111 = -tmp110
tmp113 = -tmp112
tmp114 = triton_helpers.minimum(tmp111, tmp113)
tmp116 = -tmp115
tmp117 = triton_helpers.minimum(tmp114, tmp116)
tmp119 = tmp117 - tmp118
tmp120 = triton_helpers.maximum(tmp3, tmp119)
tmp121 = tmp109 * tmp120
tmp122 = tmp120 - tmp121
tmp123 = triton_helpers.maximum(tmp3, tmp122)
tmp124 = tmp109 + tmp123
tmp126 = -tmp125
tmp128 = -tmp127
tmp129 = triton_helpers.minimum(tmp126, tmp128)
tmp131 = -tmp130
tmp132 = triton_helpers.minimum(tmp129, tmp131)
tmp134 = tmp132 - tmp133
tmp135 = triton_helpers.maximum(tmp3, tmp134)
tmp136 = tmp124 * tmp135
tmp137 = tmp135 - tmp136
tmp138 = triton_helpers.maximum(tmp3, tmp137)
tmp139 = tmp124 + tmp138
tmp141 = -tmp140
tmp143 = -tmp142
tmp144 = triton_helpers.minimum(tmp141, tmp143)
tmp146 = -tmp145
tmp147 = triton_helpers.minimum(tmp144, tmp146)
tmp149 = tmp147 - tmp148
tmp150 = triton_helpers.maximum(tmp3, tmp149)
tmp151 = tmp139 * tmp150
tmp152 = tmp150 - tmp151
tmp153 = triton_helpers.maximum(tmp3, tmp152)
tmp154 = tmp139 + tmp153
tmp156 = -tmp155
tmp158 = -tmp157
tmp159 = triton_helpers.minimum(tmp156, tmp158)
tmp161 = -tmp160
tmp162 = triton_helpers.minimum(tmp159, tmp161)
tmp164 = tmp162 - tmp163
tmp165 = triton_helpers.maximum(tmp3, tmp164)
tmp166 = tmp154 * tmp165
tmp167 = tmp165 - tmp166
tmp168 = triton_helpers.maximum(tmp3, tmp167)
tmp169 = tmp154 + tmp168
tmp171 = -tmp170
tmp173 = -tmp172
tmp174 = triton_helpers.minimum(tmp171, tmp173)
tmp176 = -tmp175
tmp177 = triton_helpers.minimum(tmp174, tmp176)
tmp179 = tmp177 - tmp178
tmp180 = triton_helpers.maximum(tmp3, tmp179)
tmp181 = tmp169 * tmp180
tmp182 = tmp180 - tmp181
tmp183 = triton_helpers.maximum(tmp3, tmp182)
tmp184 = tmp169 + tmp183
tmp186 = -tmp185
tmp188 = -tmp187
tmp189 = triton_helpers.minimum(tmp186, tmp188)
tmp191 = -tmp190
tmp192 = triton_helpers.minimum(tmp189, tmp191)
tmp194 = tmp192 - tmp193
tmp195 = triton_helpers.maximum(tmp3, tmp194)
tmp196 = tmp184 * tmp195
tmp197 = tmp195 - tmp196
tmp198 = triton_helpers.maximum(tmp3, tmp197)
tmp199 = tmp184 + tmp198
tmp201 = -tmp200
tmp203 = -tmp202
tmp204 = triton_helpers.minimum(tmp201, tmp203)
tmp206 = -tmp205
tmp207 = triton_helpers.minimum(tmp204, tmp206)
tmp209 = tmp207 - tmp208
tmp210 = triton_helpers.maximum(tmp3, tmp209)
tmp211 = tmp199 * tmp210
tmp212 = tmp210 - tmp211
tmp213 = triton_helpers.maximum(tmp3, tmp212)
tmp214 = tmp199 + tmp213
tmp216 = -tmp215
tmp218 = -tmp217
tmp219 = triton_helpers.minimum(tmp216, tmp218)
tmp221 = -tmp220
tmp222 = triton_helpers.minimum(tmp219, tmp221)
tmp224 = tmp222 - tmp223
tmp225 = triton_helpers.maximum(tmp3, tmp224)
tmp226 = tmp214 * tmp225
tmp227 = tmp225 - tmp226
tmp228 = triton_helpers.maximum(tmp3, tmp227)
tmp229 = tmp214 + tmp228
tmp231 = -tmp230
tmp233 = -tmp232
tmp234 = triton_helpers.minimum(tmp231, tmp233)
tmp236 = -tmp235
tmp237 = triton_helpers.minimum(tmp234, tmp236)
tmp239 = tmp237 - tmp238
tmp240 = triton_helpers.maximum(tmp3, tmp239)
tmp241 = tmp229 * tmp240
tmp242 = tmp240 - tmp241
tmp243 = triton_helpers.maximum(tmp3, tmp242)
tmp244 = tmp229 + tmp243
tmp246 = -tmp245
tmp248 = -tmp247
tmp249 = triton_helpers.minimum(tmp246, tmp248)
tmp251 = -tmp250
tmp252 = triton_helpers.minimum(tmp249, tmp251)
tmp254 = tmp252 - tmp253
tmp255 = triton_helpers.maximum(tmp3, tmp254)
tmp256 = tmp244 * tmp255
tmp257 = tmp255 - tmp256
tmp258 = triton_helpers.maximum(tmp3, tmp257)
tmp259 = tmp244 + tmp258
tmp261 = -tmp260
tmp263 = -tmp262
tmp264 = triton_helpers.minimum(tmp261, tmp263)
tmp266 = -tmp265
tmp267 = triton_helpers.minimum(tmp264, tmp266)
tmp269 = tmp267 - tmp268
tmp270 = triton_helpers.maximum(tmp3, tmp269)
tmp271 = tmp259 * tmp270
tmp272 = tmp270 - tmp271
tmp273 = triton_helpers.maximum(tmp3, tmp272)
tmp274 = tmp259 + tmp273
tmp276 = -tmp275
tmp278 = -tmp277
tmp279 = triton_helpers.minimum(tmp276, tmp278)
tmp281 = -tmp280
tmp282 = triton_helpers.minimum(tmp279, tmp281)
tmp284 = tmp282 - tmp283
tmp285 = triton_helpers.maximum(tmp3, tmp284)
tmp286 = tmp274 * tmp285
tmp287 = tmp285 - tmp286
tmp288 = triton_helpers.maximum(tmp3, tmp287)
tmp289 = tmp274 + tmp288
tmp291 = -tmp290
tmp293 = -tmp292
tmp294 = triton_helpers.minimum(tmp291, tmp293)
tmp296 = -tmp295
tmp297 = triton_helpers.minimum(tmp294, tmp296)
tmp299 = tmp297 - tmp298
tmp300 = triton_helpers.maximum(tmp3, tmp299)
tmp301 = tmp289 * tmp300
tmp302 = tmp300 - tmp301
tmp303 = triton_helpers.maximum(tmp3, tmp302)
tmp304 = tmp289 + tmp303
tmp306 = -tmp305
tmp308 = -tmp307
tmp309 = triton_helpers.minimum(tmp306, tmp308)
tmp311 = -tmp310
tmp312 = triton_helpers.minimum(tmp309, tmp311)
tmp314 = tmp312 - tmp313
tmp315 = triton_helpers.maximum(tmp3, tmp314)
tmp316 = tmp304 * tmp315
tmp317 = tmp315 - tmp316
tmp318 = triton_helpers.maximum(tmp3, tmp317)
tmp319 = tmp304 + tmp318
tmp321 = -tmp320
tmp323 = -tmp322
tmp324 = triton_helpers.minimum(tmp321, tmp323)
tmp326 = -tmp325
tmp327 = triton_helpers.minimum(tmp324, tmp326)
tmp329 = tmp327 - tmp328
tmp330 = triton_helpers.maximum(tmp3, tmp329)
tmp331 = tmp319 * tmp330
tmp332 = tmp330 - tmp331
tmp333 = triton_helpers.maximum(tmp3, tmp332)
tmp334 = tmp319 + tmp333
tmp336 = -tmp335
tmp338 = -tmp337
tmp339 = triton_helpers.minimum(tmp336, tmp338)
tmp341 = -tmp340
tmp342 = triton_helpers.minimum(tmp339, tmp341)
tmp344 = tmp342 - tmp343
tmp345 = triton_helpers.maximum(tmp3, tmp344)
tmp346 = tmp334 * tmp345
tmp347 = tmp345 - tmp346
tmp348 = triton_helpers.maximum(tmp3, tmp347)
tmp349 = tmp334 + tmp348
tmp351 = -tmp350
tmp353 = -tmp352
tmp354 = triton_helpers.minimum(tmp351, tmp353)
tmp356 = -tmp355
tmp357 = triton_helpers.minimum(tmp354, tmp356)
tmp359 = tmp357 - tmp358
tmp360 = triton_helpers.maximum(tmp3, tmp359)
tmp361 = tmp349 * tmp360
tmp362 = tmp360 - tmp361
tmp363 = triton_helpers.maximum(tmp3, tmp362)
tmp364 = tmp349 + tmp363
tmp366 = -tmp365
tmp368 = -tmp367
tmp369 = triton_helpers.minimum(tmp366, tmp368)
tmp371 = -tmp370
tmp372 = triton_helpers.minimum(tmp369, tmp371)
tmp374 = tmp372 - tmp373
tmp375 = triton_helpers.maximum(tmp3, tmp374)
tmp376 = tmp364 * tmp375
tmp377 = tmp375 - tmp376
tmp378 = triton_helpers.maximum(tmp3, tmp377)
tmp379 = tmp364 + tmp378
tmp381 = -tmp380
tmp383 = -tmp382
tmp384 = triton_helpers.minimum(tmp381, tmp383)
tmp386 = -tmp385
tmp387 = triton_helpers.minimum(tmp384, tmp386)
tmp389 = tmp387 - tmp388
tmp390 = triton_helpers.maximum(tmp3, tmp389)
tmp391 = tmp379 * tmp390
tmp392 = tmp390 - tmp391
tmp393 = triton_helpers.maximum(tmp3, tmp392)
tmp394 = tmp379 + tmp393
tmp396 = -tmp395
tmp398 = -tmp397
tmp399 = triton_helpers.minimum(tmp396, tmp398)
tmp401 = -tmp400
tmp402 = triton_helpers.minimum(tmp399, tmp401)
tmp404 = tmp402 - tmp403
tmp405 = triton_helpers.maximum(tmp3, tmp404)
tmp406 = tmp394 * tmp405
tmp407 = tmp405 - tmp406
tmp408 = triton_helpers.maximum(tmp3, tmp407)
tmp409 = tmp394 + tmp408
tmp411 = -tmp410
tmp413 = -tmp412
tmp414 = triton_helpers.minimum(tmp411, tmp413)
tmp416 = -tmp415
tmp417 = triton_helpers.minimum(tmp414, tmp416)
tmp419 = tmp417 - tmp418
tmp420 = triton_helpers.maximum(tmp3, tmp419)
tmp421 = tmp409 * tmp420
tmp422 = tmp420 - tmp421
tmp423 = triton_helpers.maximum(tmp3, tmp422)
tmp424 = tmp409 + tmp423
tmp426 = -tmp425
tmp428 = -tmp427
tmp429 = triton_helpers.minimum(tmp426, tmp428)
tmp431 = -tmp430
tmp432 = triton_helpers.minimum(tmp429, tmp431)
tmp434 = tmp432 - tmp433
tmp435 = triton_helpers.maximum(tmp3, tmp434)
tmp436 = tmp424 * tmp435
tmp437 = tmp435 - tmp436
tmp438 = triton_helpers.maximum(tmp3, tmp437)
tmp439 = tmp424 + tmp438
tmp441 = -tmp440
tmp443 = -tmp442
tmp444 = triton_helpers.minimum(tmp441, tmp443)
tmp446 = -tmp445
tmp447 = triton_helpers.minimum(tmp444, tmp446)
tmp449 = tmp447 - tmp448
tmp450 = triton_helpers.maximum(tmp3, tmp449)
tmp451 = tmp439 * tmp450
tmp452 = tmp450 - tmp451
tmp453 = triton_helpers.maximum(tmp3, tmp452)
tmp454 = tmp439 + tmp453
tmp456 = -tmp455
tmp458 = -tmp457
tmp459 = triton_helpers.minimum(tmp456, tmp458)
tmp461 = -tmp460
tmp462 = triton_helpers.minimum(tmp459, tmp461)
tmp464 = tmp462 - tmp463
tmp465 = triton_helpers.maximum(tmp3, tmp464)
tmp466 = tmp454 * tmp465
tmp467 = tmp465 - tmp466
tmp468 = triton_helpers.maximum(tmp3, tmp467)
tmp469 = tmp454 + tmp468
tmp471 = -tmp470
tmp473 = -tmp472
tmp474 = triton_helpers.minimum(tmp471, tmp473)
tmp476 = -tmp475
tmp477 = triton_helpers.minimum(tmp474, tmp476)
tmp479 = tmp477 - tmp478
tmp480 = triton_helpers.maximum(tmp3, tmp479)
tmp481 = tmp469 * tmp480
tmp482 = tmp480 - tmp481
tmp483 = triton_helpers.maximum(tmp3, tmp482)
tmp484 = tmp469 + tmp483
tmp486 = -tmp485
tmp488 = -tmp487
tmp489 = triton_helpers.minimum(tmp486, tmp488)
tmp491 = -tmp490
tmp492 = triton_helpers.minimum(tmp489, tmp491)
tmp494 = tmp492 - tmp493
tmp495 = triton_helpers.maximum(tmp3, tmp494)
tmp496 = tmp484 * tmp495
tmp497 = tmp495 - tmp496
tmp498 = triton_helpers.maximum(tmp3, tmp497)
tmp499 = tmp484 + tmp498
tmp501 = -tmp500
tmp503 = -tmp502
tmp504 = triton_helpers.minimum(tmp501, tmp503)
tmp506 = -tmp505
tmp507 = triton_helpers.minimum(tmp504, tmp506)
tmp509 = tmp507 - tmp508
tmp510 = triton_helpers.maximum(tmp3, tmp509)
tmp511 = tmp499 * tmp510
tmp512 = tmp510 - tmp511
tmp513 = triton_helpers.maximum(tmp3, tmp512)
tmp514 = tmp499 + tmp513
tmp516 = -tmp515
tmp518 = -tmp517
tmp519 = triton_helpers.minimum(tmp516, tmp518)
tmp521 = -tmp520
tmp522 = triton_helpers.minimum(tmp519, tmp521)
tmp524 = tmp522 - tmp523
tmp525 = triton_helpers.maximum(tmp3, tmp524)
tmp526 = tmp514 * tmp525
tmp527 = tmp525 - tmp526
tmp528 = triton_helpers.maximum(tmp3, tmp527)
tmp529 = tmp514 + tmp528
tmp531 = -tmp530
tmp533 = -tmp532
tmp534 = triton_helpers.minimum(tmp531, tmp533)
tmp536 = -tmp535
tmp537 = triton_helpers.minimum(tmp534, tmp536)
tmp539 = tmp537 - tmp538
tmp540 = triton_helpers.maximum(tmp3, tmp539)
tmp541 = tmp529 * tmp540
tmp542 = tmp540 - tmp541
tmp543 = triton_helpers.maximum(tmp3, tmp542)
tmp544 = tmp529 + tmp543
tmp546 = -tmp545
tmp548 = -tmp547
tmp549 = triton_helpers.minimum(tmp546, tmp548)
tmp551 = -tmp550
tmp552 = triton_helpers.minimum(tmp549, tmp551)
tmp554 = tmp552 - tmp553
tmp555 = triton_helpers.maximum(tmp3, tmp554)
tmp556 = tmp544 * tmp555
tmp557 = tmp555 - tmp556
tmp558 = triton_helpers.maximum(tmp3, tmp557)
tmp559 = tmp544 + tmp558
tmp561 = -tmp560
tmp563 = -tmp562
tmp564 = triton_helpers.minimum(tmp561, tmp563)
tmp566 = -tmp565
tmp567 = triton_helpers.minimum(tmp564, tmp566)
tmp569 = tmp567 - tmp568
tmp570 = triton_helpers.maximum(tmp3, tmp569)
tmp571 = tmp559 * tmp570
tmp572 = tmp570 - tmp571
tmp573 = triton_helpers.maximum(tmp3, tmp572)
tmp574 = tmp559 + tmp573
tmp576 = -tmp575
tmp578 = -tmp577
tmp579 = triton_helpers.minimum(tmp576, tmp578)
tmp581 = -tmp580
tmp582 = triton_helpers.minimum(tmp579, tmp581)
tmp584 = tmp582 - tmp583
tmp585 = triton_helpers.maximum(tmp3, tmp584)
tmp586 = tmp574 * tmp585
tmp587 = tmp585 - tmp586
tmp588 = triton_helpers.maximum(tmp3, tmp587)
tmp589 = tmp574 + tmp588
tmp591 = -tmp590
tmp593 = -tmp592
tmp594 = triton_helpers.minimum(tmp591, tmp593)
tmp596 = -tmp595
tmp597 = triton_helpers.minimum(tmp594, tmp596)
tmp599 = tmp597 - tmp598
tmp600 = triton_helpers.maximum(tmp3, tmp599)
tmp601 = tmp589 * tmp600
tmp602 = tmp600 - tmp601
tmp603 = triton_helpers.maximum(tmp3, tmp602)
tmp604 = tmp589 + tmp603
tmp606 = -tmp605
tmp608 = -tmp607
tmp609 = triton_helpers.minimum(tmp606, tmp608)
tmp611 = -tmp610
tmp612 = triton_helpers.minimum(tmp609, tmp611)
tmp614 = tmp612 - tmp613
tmp615 = triton_helpers.maximum(tmp3, tmp614)
tmp616 = tmp604 * tmp615
tmp617 = tmp615 - tmp616
tmp618 = triton_helpers.maximum(tmp3, tmp617)
tmp619 = tmp604 + tmp618
tmp621 = -tmp620
tmp623 = -tmp622
tmp624 = triton_helpers.minimum(tmp621, tmp623)
tmp626 = -tmp625
tmp627 = triton_helpers.minimum(tmp624, tmp626)
tmp629 = tmp627 - tmp628
tmp630 = triton_helpers.maximum(tmp3, tmp629)
tmp631 = tmp619 * tmp630
tmp632 = tmp630 - tmp631
tmp633 = triton_helpers.maximum(tmp3, tmp632)
tmp634 = tmp619 + tmp633
tmp636 = -tmp635
tmp638 = -tmp637
tmp639 = triton_helpers.minimum(tmp636, tmp638)
tmp641 = -tmp640
tmp642 = triton_helpers.minimum(tmp639, tmp641)
tmp644 = tmp642 - tmp643
tmp645 = triton_helpers.maximum(tmp3, tmp644)
tmp646 = tmp634 * tmp645
tmp647 = tmp645 - tmp646
tmp648 = triton_helpers.maximum(tmp3, tmp647)
tmp649 = tmp634 + tmp648
tmp651 = -tmp650
tmp653 = -tmp652
tmp654 = triton_helpers.minimum(tmp651, tmp653)
tmp656 = -tmp655
tmp657 = triton_helpers.minimum(tmp654, tmp656)
tmp659 = tmp657 - tmp658
tmp660 = triton_helpers.maximum(tmp3, tmp659)
tmp661 = tmp649 * tmp660
tmp662 = tmp660 - tmp661
tmp663 = triton_helpers.maximum(tmp3, tmp662)
tmp664 = tmp649 + tmp663
tmp666 = -tmp665
tmp668 = -tmp667
tmp669 = triton_helpers.minimum(tmp666, tmp668)
tmp671 = -tmp670
tmp672 = triton_helpers.minimum(tmp669, tmp671)
tmp674 = tmp672 - tmp673
tmp675 = triton_helpers.maximum(tmp3, tmp674)
tmp676 = tmp664 * tmp675
tmp677 = tmp675 - tmp676
tmp678 = triton_helpers.maximum(tmp3, tmp677)
tmp679 = tmp664 + tmp678
tmp681 = -tmp680
tmp683 = -tmp682
tmp684 = triton_helpers.minimum(tmp681, tmp683)
tmp686 = -tmp685
tmp687 = triton_helpers.minimum(tmp684, tmp686)
tmp689 = tmp687 - tmp688
tmp690 = triton_helpers.maximum(tmp3, tmp689)
tmp691 = tmp679 * tmp690
tmp692 = tmp690 - tmp691
tmp693 = triton_helpers.maximum(tmp3, tmp692)
tmp694 = tmp679 + tmp693
tmp696 = -tmp695
tmp698 = -tmp697
tmp699 = triton_helpers.minimum(tmp696, tmp698)
tmp701 = -tmp700
tmp702 = triton_helpers.minimum(tmp699, tmp701)
tmp704 = tmp702 - tmp703
tmp705 = triton_helpers.maximum(tmp3, tmp704)
tmp706 = tmp694 * tmp705
tmp707 = tmp705 - tmp706
tmp708 = triton_helpers.maximum(tmp3, tmp707)
tmp709 = tmp694 + tmp708
tmp711 = -tmp710
tmp713 = -tmp712
tmp714 = triton_helpers.minimum(tmp711, tmp713)
tmp716 = -tmp715
tmp717 = triton_helpers.minimum(tmp714, tmp716)
tmp719 = tmp717 - tmp718
tmp720 = triton_helpers.maximum(tmp3, tmp719)
tmp721 = tmp709 * tmp720
tmp722 = tmp720 - tmp721
tmp723 = triton_helpers.maximum(tmp3, tmp722)
tmp724 = tmp709 + tmp723
tmp726 = -tmp725
tmp728 = -tmp727
tmp729 = triton_helpers.minimum(tmp726, tmp728)
tmp731 = -tmp730
tmp732 = triton_helpers.minimum(tmp729, tmp731)
tmp734 = tmp732 - tmp733
tmp735 = triton_helpers.maximum(tmp3, tmp734)
tmp736 = tmp724 * tmp735
tmp737 = tmp735 - tmp736
tmp738 = triton_helpers.maximum(tmp3, tmp737)
tmp739 = tmp724 + tmp738
tmp741 = -tmp740
tmp743 = -tmp742
tmp744 = triton_helpers.minimum(tmp741, tmp743)
tmp746 = -tmp745
tmp747 = triton_helpers.minimum(tmp744, tmp746)
tmp749 = tmp747 - tmp748
tmp750 = triton_helpers.maximum(tmp3, tmp749)
tmp751 = tmp739 * tmp750
tmp752 = tmp750 - tmp751
tmp753 = triton_helpers.maximum(tmp3, tmp752)
tmp754 = tmp739 + tmp753
tl.store(in_out_ptr22 + x0, tmp754, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_neg_0[grid(256)](arg0_1, buf0, buf4, buf8, buf16,
buf20, buf24, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = torch.ops.aten.max_pool3d_with_indices.default(buf0, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf0
buf2 = buf1[0]
del buf1
buf5 = torch.ops.aten.max_pool3d_with_indices.default(buf4, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf4
buf6 = buf5[0]
del buf5
buf9 = torch.ops.aten.max_pool3d_with_indices.default(buf8, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf8
buf10 = buf9[0]
del buf9
buf12 = buf10
del buf10
triton_poi_fused_minimum_neg_1[grid(256)](buf12, buf2, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf13 = torch.ops.aten.max_pool3d_with_indices.default(buf12, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf14 = buf13[0]
del buf13
buf17 = torch.ops.aten.max_pool3d_with_indices.default(buf16, [3, 1,
1], [1, 1, 1], [1, 0, 0])
buf18 = buf17[0]
del buf17
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [1, 3,
1], [1, 1, 1], [0, 1, 0])
buf22 = buf21[0]
del buf21
buf25 = torch.ops.aten.max_pool3d_with_indices.default(buf24, [1, 1,
3], [1, 1, 1], [0, 0, 1])
buf26 = buf25[0]
del buf25
buf28 = buf24
del buf24
buf32 = buf20
del buf20
buf36 = buf16
del buf16
buf44 = buf12
del buf12
buf48 = buf6
del buf6
buf52 = buf2
del buf2
triton_poi_fused_minimum_neg_2[grid(256)](buf18, buf26, buf22,
buf28, buf32, buf36, buf44, buf48, buf52, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf29 = torch.ops.aten.max_pool3d_with_indices.default(buf28, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf28
buf30 = buf29[0]
del buf29
buf33 = torch.ops.aten.max_pool3d_with_indices.default(buf32, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf32
buf34 = buf33[0]
del buf33
buf37 = torch.ops.aten.max_pool3d_with_indices.default(buf36, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf36
buf38 = buf37[0]
del buf37
buf40 = buf30
del buf30
triton_poi_fused_minimum_neg_3[grid(256)](buf40, buf38, buf34, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf41 = torch.ops.aten.max_pool3d_with_indices.default(buf40, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf42 = buf41[0]
del buf41
buf45 = torch.ops.aten.max_pool3d_with_indices.default(buf44, [3, 1,
1], [1, 1, 1], [1, 0, 0])
buf46 = buf45[0]
del buf45
buf49 = torch.ops.aten.max_pool3d_with_indices.default(buf48, [1, 3,
1], [1, 1, 1], [0, 1, 0])
buf50 = buf49[0]
del buf49
buf53 = torch.ops.aten.max_pool3d_with_indices.default(buf52, [1, 1,
3], [1, 1, 1], [0, 0, 1])
buf54 = buf53[0]
del buf53
buf56 = buf52
del buf52
buf60 = buf48
del buf48
buf64 = buf44
del buf44
buf72 = buf40
del buf40
buf76 = buf38
del buf38
buf80 = buf34
del buf34
triton_poi_fused_minimum_neg_2[grid(256)](buf46, buf54, buf50,
buf56, buf60, buf64, buf72, buf76, buf80, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf57 = torch.ops.aten.max_pool3d_with_indices.default(buf56, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf56
buf58 = buf57[0]
del buf57
buf61 = torch.ops.aten.max_pool3d_with_indices.default(buf60, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf60
buf62 = buf61[0]
del buf61
buf65 = torch.ops.aten.max_pool3d_with_indices.default(buf64, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf64
buf66 = buf65[0]
del buf65
buf68 = buf58
del buf58
triton_poi_fused_minimum_neg_3[grid(256)](buf68, buf66, buf62, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf69 = torch.ops.aten.max_pool3d_with_indices.default(buf68, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf70 = buf69[0]
del buf69
buf73 = torch.ops.aten.max_pool3d_with_indices.default(buf72, [3, 1,
1], [1, 1, 1], [1, 0, 0])
buf74 = buf73[0]
del buf73
buf77 = torch.ops.aten.max_pool3d_with_indices.default(buf76, [1, 3,
1], [1, 1, 1], [0, 1, 0])
buf78 = buf77[0]
del buf77
buf81 = torch.ops.aten.max_pool3d_with_indices.default(buf80, [1, 1,
3], [1, 1, 1], [0, 0, 1])
buf82 = buf81[0]
del buf81
buf84 = buf80
del buf80
buf88 = buf76
del buf76
buf92 = buf72
del buf72
buf100 = buf68
del buf68
buf104 = buf66
del buf66
buf108 = buf62
del buf62
triton_poi_fused_minimum_neg_2[grid(256)](buf74, buf82, buf78,
buf84, buf88, buf92, buf100, buf104, buf108, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf85 = torch.ops.aten.max_pool3d_with_indices.default(buf84, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf84
buf86 = buf85[0]
del buf85
buf89 = torch.ops.aten.max_pool3d_with_indices.default(buf88, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf88
buf90 = buf89[0]
del buf89
buf93 = torch.ops.aten.max_pool3d_with_indices.default(buf92, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf92
buf94 = buf93[0]
del buf93
buf96 = buf86
del buf86
triton_poi_fused_minimum_neg_3[grid(256)](buf96, buf94, buf90, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf97 = torch.ops.aten.max_pool3d_with_indices.default(buf96, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf98 = buf97[0]
del buf97
buf101 = torch.ops.aten.max_pool3d_with_indices.default(buf100, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf102 = buf101[0]
del buf101
buf105 = torch.ops.aten.max_pool3d_with_indices.default(buf104, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf106 = buf105[0]
del buf105
buf109 = torch.ops.aten.max_pool3d_with_indices.default(buf108, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf110 = buf109[0]
del buf109
buf112 = buf108
del buf108
buf116 = buf104
del buf104
buf120 = buf100
del buf100
buf128 = buf96
del buf96
buf132 = buf94
del buf94
buf136 = buf90
del buf90
triton_poi_fused_minimum_neg_2[grid(256)](buf102, buf110, buf106,
buf112, buf116, buf120, buf128, buf132, buf136, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf113 = torch.ops.aten.max_pool3d_with_indices.default(buf112, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf112
buf114 = buf113[0]
del buf113
buf117 = torch.ops.aten.max_pool3d_with_indices.default(buf116, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf116
buf118 = buf117[0]
del buf117
buf121 = torch.ops.aten.max_pool3d_with_indices.default(buf120, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf120
buf122 = buf121[0]
del buf121
buf124 = buf114
del buf114
triton_poi_fused_minimum_neg_3[grid(256)](buf124, buf122, buf118,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf125 = torch.ops.aten.max_pool3d_with_indices.default(buf124, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf126 = buf125[0]
del buf125
buf129 = torch.ops.aten.max_pool3d_with_indices.default(buf128, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf130 = buf129[0]
del buf129
buf133 = torch.ops.aten.max_pool3d_with_indices.default(buf132, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf134 = buf133[0]
del buf133
buf137 = torch.ops.aten.max_pool3d_with_indices.default(buf136, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf138 = buf137[0]
del buf137
buf140 = buf136
del buf136
buf144 = buf132
del buf132
buf148 = buf128
del buf128
buf156 = buf124
del buf124
buf160 = buf122
del buf122
buf164 = buf118
del buf118
triton_poi_fused_minimum_neg_2[grid(256)](buf130, buf138, buf134,
buf140, buf144, buf148, buf156, buf160, buf164, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf141 = torch.ops.aten.max_pool3d_with_indices.default(buf140, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf140
buf142 = buf141[0]
del buf141
buf145 = torch.ops.aten.max_pool3d_with_indices.default(buf144, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf144
buf146 = buf145[0]
del buf145
buf149 = torch.ops.aten.max_pool3d_with_indices.default(buf148, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf148
buf150 = buf149[0]
del buf149
buf152 = buf142
del buf142
triton_poi_fused_minimum_neg_3[grid(256)](buf152, buf150, buf146,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf153 = torch.ops.aten.max_pool3d_with_indices.default(buf152, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf154 = buf153[0]
del buf153
buf157 = torch.ops.aten.max_pool3d_with_indices.default(buf156, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf158 = buf157[0]
del buf157
buf161 = torch.ops.aten.max_pool3d_with_indices.default(buf160, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf162 = buf161[0]
del buf161
buf165 = torch.ops.aten.max_pool3d_with_indices.default(buf164, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf166 = buf165[0]
del buf165
buf168 = buf164
del buf164
buf172 = buf160
del buf160
buf176 = buf156
del buf156
buf184 = buf152
del buf152
buf188 = buf150
del buf150
buf192 = buf146
del buf146
triton_poi_fused_minimum_neg_2[grid(256)](buf158, buf166, buf162,
buf168, buf172, buf176, buf184, buf188, buf192, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf169 = torch.ops.aten.max_pool3d_with_indices.default(buf168, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf168
buf170 = buf169[0]
del buf169
buf173 = torch.ops.aten.max_pool3d_with_indices.default(buf172, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf172
buf174 = buf173[0]
del buf173
buf177 = torch.ops.aten.max_pool3d_with_indices.default(buf176, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf176
buf178 = buf177[0]
del buf177
buf180 = buf170
del buf170
triton_poi_fused_minimum_neg_3[grid(256)](buf180, buf178, buf174,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf181 = torch.ops.aten.max_pool3d_with_indices.default(buf180, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf182 = buf181[0]
del buf181
buf185 = torch.ops.aten.max_pool3d_with_indices.default(buf184, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf186 = buf185[0]
del buf185
buf189 = torch.ops.aten.max_pool3d_with_indices.default(buf188, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf190 = buf189[0]
del buf189
buf193 = torch.ops.aten.max_pool3d_with_indices.default(buf192, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf194 = buf193[0]
del buf193
buf196 = buf192
del buf192
buf200 = buf188
del buf188
buf204 = buf184
del buf184
buf212 = buf180
del buf180
buf216 = buf178
del buf178
buf220 = buf174
del buf174
triton_poi_fused_minimum_neg_2[grid(256)](buf186, buf194, buf190,
buf196, buf200, buf204, buf212, buf216, buf220, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf197 = torch.ops.aten.max_pool3d_with_indices.default(buf196, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf196
buf198 = buf197[0]
del buf197
buf201 = torch.ops.aten.max_pool3d_with_indices.default(buf200, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf200
buf202 = buf201[0]
del buf201
buf205 = torch.ops.aten.max_pool3d_with_indices.default(buf204, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf204
buf206 = buf205[0]
del buf205
buf208 = buf198
del buf198
triton_poi_fused_minimum_neg_3[grid(256)](buf208, buf206, buf202,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf209 = torch.ops.aten.max_pool3d_with_indices.default(buf208, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf210 = buf209[0]
del buf209
buf213 = torch.ops.aten.max_pool3d_with_indices.default(buf212, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf214 = buf213[0]
del buf213
buf217 = torch.ops.aten.max_pool3d_with_indices.default(buf216, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf218 = buf217[0]
del buf217
buf221 = torch.ops.aten.max_pool3d_with_indices.default(buf220, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf222 = buf221[0]
del buf221
buf224 = buf220
del buf220
buf228 = buf216
del buf216
buf232 = buf212
del buf212
buf240 = buf208
del buf208
buf244 = buf206
del buf206
buf248 = buf202
del buf202
triton_poi_fused_minimum_neg_2[grid(256)](buf214, buf222, buf218,
buf224, buf228, buf232, buf240, buf244, buf248, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf225 = torch.ops.aten.max_pool3d_with_indices.default(buf224, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf224
buf226 = buf225[0]
del buf225
buf229 = torch.ops.aten.max_pool3d_with_indices.default(buf228, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf228
buf230 = buf229[0]
del buf229
buf233 = torch.ops.aten.max_pool3d_with_indices.default(buf232, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf232
buf234 = buf233[0]
del buf233
buf236 = buf226
del buf226
triton_poi_fused_minimum_neg_3[grid(256)](buf236, buf234, buf230,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf237 = torch.ops.aten.max_pool3d_with_indices.default(buf236, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf238 = buf237[0]
del buf237
buf241 = torch.ops.aten.max_pool3d_with_indices.default(buf240, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf242 = buf241[0]
del buf241
buf245 = torch.ops.aten.max_pool3d_with_indices.default(buf244, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf246 = buf245[0]
del buf245
buf249 = torch.ops.aten.max_pool3d_with_indices.default(buf248, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf250 = buf249[0]
del buf249
buf252 = buf248
del buf248
buf256 = buf244
del buf244
buf260 = buf240
del buf240
buf268 = buf236
del buf236
buf272 = buf234
del buf234
buf276 = buf230
del buf230
triton_poi_fused_minimum_neg_2[grid(256)](buf242, buf250, buf246,
buf252, buf256, buf260, buf268, buf272, buf276, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf253 = torch.ops.aten.max_pool3d_with_indices.default(buf252, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf252
buf254 = buf253[0]
del buf253
buf257 = torch.ops.aten.max_pool3d_with_indices.default(buf256, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf256
buf258 = buf257[0]
del buf257
buf261 = torch.ops.aten.max_pool3d_with_indices.default(buf260, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf260
buf262 = buf261[0]
del buf261
buf264 = buf254
del buf254
triton_poi_fused_minimum_neg_3[grid(256)](buf264, buf262, buf258,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf265 = torch.ops.aten.max_pool3d_with_indices.default(buf264, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf266 = buf265[0]
del buf265
buf269 = torch.ops.aten.max_pool3d_with_indices.default(buf268, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf270 = buf269[0]
del buf269
buf273 = torch.ops.aten.max_pool3d_with_indices.default(buf272, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf274 = buf273[0]
del buf273
buf277 = torch.ops.aten.max_pool3d_with_indices.default(buf276, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf278 = buf277[0]
del buf277
buf280 = buf276
del buf276
buf284 = buf272
del buf272
buf288 = buf268
del buf268
buf296 = buf264
del buf264
buf300 = buf262
del buf262
buf304 = buf258
del buf258
triton_poi_fused_minimum_neg_2[grid(256)](buf270, buf278, buf274,
buf280, buf284, buf288, buf296, buf300, buf304, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf281 = torch.ops.aten.max_pool3d_with_indices.default(buf280, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf280
buf282 = buf281[0]
del buf281
buf285 = torch.ops.aten.max_pool3d_with_indices.default(buf284, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf284
buf286 = buf285[0]
del buf285
buf289 = torch.ops.aten.max_pool3d_with_indices.default(buf288, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf288
buf290 = buf289[0]
del buf289
buf292 = buf282
del buf282
triton_poi_fused_minimum_neg_3[grid(256)](buf292, buf290, buf286,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf293 = torch.ops.aten.max_pool3d_with_indices.default(buf292, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf294 = buf293[0]
del buf293
buf297 = torch.ops.aten.max_pool3d_with_indices.default(buf296, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf298 = buf297[0]
del buf297
buf301 = torch.ops.aten.max_pool3d_with_indices.default(buf300, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf302 = buf301[0]
del buf301
buf305 = torch.ops.aten.max_pool3d_with_indices.default(buf304, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf306 = buf305[0]
del buf305
buf308 = buf304
del buf304
buf312 = buf300
del buf300
buf316 = buf296
del buf296
buf324 = buf292
del buf292
buf328 = buf290
del buf290
buf332 = buf286
del buf286
triton_poi_fused_minimum_neg_2[grid(256)](buf298, buf306, buf302,
buf308, buf312, buf316, buf324, buf328, buf332, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf309 = torch.ops.aten.max_pool3d_with_indices.default(buf308, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf308
buf310 = buf309[0]
del buf309
buf313 = torch.ops.aten.max_pool3d_with_indices.default(buf312, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf312
buf314 = buf313[0]
del buf313
buf317 = torch.ops.aten.max_pool3d_with_indices.default(buf316, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf316
buf318 = buf317[0]
del buf317
buf320 = buf310
del buf310
triton_poi_fused_minimum_neg_3[grid(256)](buf320, buf318, buf314,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf321 = torch.ops.aten.max_pool3d_with_indices.default(buf320, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf322 = buf321[0]
del buf321
buf325 = torch.ops.aten.max_pool3d_with_indices.default(buf324, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf326 = buf325[0]
del buf325
buf329 = torch.ops.aten.max_pool3d_with_indices.default(buf328, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf330 = buf329[0]
del buf329
buf333 = torch.ops.aten.max_pool3d_with_indices.default(buf332, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf334 = buf333[0]
del buf333
buf336 = buf332
del buf332
buf340 = buf328
del buf328
buf344 = buf324
del buf324
buf352 = buf320
del buf320
buf356 = buf318
del buf318
buf360 = buf314
del buf314
triton_poi_fused_minimum_neg_2[grid(256)](buf326, buf334, buf330,
buf336, buf340, buf344, buf352, buf356, buf360, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf337 = torch.ops.aten.max_pool3d_with_indices.default(buf336, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf336
buf338 = buf337[0]
del buf337
buf341 = torch.ops.aten.max_pool3d_with_indices.default(buf340, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf340
buf342 = buf341[0]
del buf341
buf345 = torch.ops.aten.max_pool3d_with_indices.default(buf344, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf344
buf346 = buf345[0]
del buf345
buf348 = buf338
del buf338
triton_poi_fused_minimum_neg_3[grid(256)](buf348, buf346, buf342,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf349 = torch.ops.aten.max_pool3d_with_indices.default(buf348, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf350 = buf349[0]
del buf349
buf353 = torch.ops.aten.max_pool3d_with_indices.default(buf352, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf354 = buf353[0]
del buf353
buf357 = torch.ops.aten.max_pool3d_with_indices.default(buf356, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf358 = buf357[0]
del buf357
buf361 = torch.ops.aten.max_pool3d_with_indices.default(buf360, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf362 = buf361[0]
del buf361
buf364 = buf360
del buf360
buf368 = buf356
del buf356
buf372 = buf352
del buf352
buf380 = buf348
del buf348
buf384 = buf346
del buf346
buf388 = buf342
del buf342
triton_poi_fused_minimum_neg_2[grid(256)](buf354, buf362, buf358,
buf364, buf368, buf372, buf380, buf384, buf388, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf365 = torch.ops.aten.max_pool3d_with_indices.default(buf364, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf364
buf366 = buf365[0]
del buf365
buf369 = torch.ops.aten.max_pool3d_with_indices.default(buf368, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf368
buf370 = buf369[0]
del buf369
buf373 = torch.ops.aten.max_pool3d_with_indices.default(buf372, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf372
buf374 = buf373[0]
del buf373
buf376 = buf366
del buf366
triton_poi_fused_minimum_neg_3[grid(256)](buf376, buf374, buf370,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf377 = torch.ops.aten.max_pool3d_with_indices.default(buf376, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf378 = buf377[0]
del buf377
buf381 = torch.ops.aten.max_pool3d_with_indices.default(buf380, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf382 = buf381[0]
del buf381
buf385 = torch.ops.aten.max_pool3d_with_indices.default(buf384, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf386 = buf385[0]
del buf385
buf389 = torch.ops.aten.max_pool3d_with_indices.default(buf388, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf390 = buf389[0]
del buf389
buf392 = buf388
del buf388
buf396 = buf384
del buf384
buf400 = buf380
del buf380
buf408 = buf376
del buf376
buf412 = buf374
del buf374
buf416 = buf370
del buf370
triton_poi_fused_minimum_neg_2[grid(256)](buf382, buf390, buf386,
buf392, buf396, buf400, buf408, buf412, buf416, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf393 = torch.ops.aten.max_pool3d_with_indices.default(buf392, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf392
buf394 = buf393[0]
del buf393
buf397 = torch.ops.aten.max_pool3d_with_indices.default(buf396, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf396
buf398 = buf397[0]
del buf397
buf401 = torch.ops.aten.max_pool3d_with_indices.default(buf400, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf400
buf402 = buf401[0]
del buf401
buf404 = buf394
del buf394
triton_poi_fused_minimum_neg_3[grid(256)](buf404, buf402, buf398,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf405 = torch.ops.aten.max_pool3d_with_indices.default(buf404, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf406 = buf405[0]
del buf405
buf409 = torch.ops.aten.max_pool3d_with_indices.default(buf408, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf410 = buf409[0]
del buf409
buf413 = torch.ops.aten.max_pool3d_with_indices.default(buf412, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf414 = buf413[0]
del buf413
buf417 = torch.ops.aten.max_pool3d_with_indices.default(buf416, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf418 = buf417[0]
del buf417
buf420 = buf416
del buf416
buf424 = buf412
del buf412
buf428 = buf408
del buf408
buf436 = buf404
del buf404
buf440 = buf402
del buf402
buf444 = buf398
del buf398
triton_poi_fused_minimum_neg_2[grid(256)](buf410, buf418, buf414,
buf420, buf424, buf428, buf436, buf440, buf444, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf421 = torch.ops.aten.max_pool3d_with_indices.default(buf420, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf420
buf422 = buf421[0]
del buf421
buf425 = torch.ops.aten.max_pool3d_with_indices.default(buf424, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf424
buf426 = buf425[0]
del buf425
buf429 = torch.ops.aten.max_pool3d_with_indices.default(buf428, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf428
buf430 = buf429[0]
del buf429
buf432 = buf422
del buf422
triton_poi_fused_minimum_neg_3[grid(256)](buf432, buf430, buf426,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf433 = torch.ops.aten.max_pool3d_with_indices.default(buf432, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf434 = buf433[0]
del buf433
buf437 = torch.ops.aten.max_pool3d_with_indices.default(buf436, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf438 = buf437[0]
del buf437
buf441 = torch.ops.aten.max_pool3d_with_indices.default(buf440, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf442 = buf441[0]
del buf441
buf445 = torch.ops.aten.max_pool3d_with_indices.default(buf444, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf446 = buf445[0]
del buf445
buf448 = buf444
del buf444
buf452 = buf440
del buf440
buf456 = buf436
del buf436
buf464 = buf432
del buf432
buf468 = buf430
del buf430
buf472 = buf426
del buf426
triton_poi_fused_minimum_neg_2[grid(256)](buf438, buf446, buf442,
buf448, buf452, buf456, buf464, buf468, buf472, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf449 = torch.ops.aten.max_pool3d_with_indices.default(buf448, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf448
buf450 = buf449[0]
del buf449
buf453 = torch.ops.aten.max_pool3d_with_indices.default(buf452, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf452
buf454 = buf453[0]
del buf453
buf457 = torch.ops.aten.max_pool3d_with_indices.default(buf456, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf456
buf458 = buf457[0]
del buf457
buf460 = buf450
del buf450
triton_poi_fused_minimum_neg_3[grid(256)](buf460, buf458, buf454,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf461 = torch.ops.aten.max_pool3d_with_indices.default(buf460, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf462 = buf461[0]
del buf461
buf465 = torch.ops.aten.max_pool3d_with_indices.default(buf464, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf466 = buf465[0]
del buf465
buf469 = torch.ops.aten.max_pool3d_with_indices.default(buf468, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf470 = buf469[0]
del buf469
buf473 = torch.ops.aten.max_pool3d_with_indices.default(buf472, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf474 = buf473[0]
del buf473
buf476 = buf472
del buf472
buf480 = buf468
del buf468
buf484 = buf464
del buf464
buf492 = buf460
del buf460
buf496 = buf458
del buf458
buf500 = buf454
del buf454
triton_poi_fused_minimum_neg_2[grid(256)](buf466, buf474, buf470,
buf476, buf480, buf484, buf492, buf496, buf500, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf477 = torch.ops.aten.max_pool3d_with_indices.default(buf476, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf476
buf478 = buf477[0]
del buf477
buf481 = torch.ops.aten.max_pool3d_with_indices.default(buf480, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf480
buf482 = buf481[0]
del buf481
buf485 = torch.ops.aten.max_pool3d_with_indices.default(buf484, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf484
buf486 = buf485[0]
del buf485
buf488 = buf478
del buf478
triton_poi_fused_minimum_neg_3[grid(256)](buf488, buf486, buf482,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf489 = torch.ops.aten.max_pool3d_with_indices.default(buf488, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf490 = buf489[0]
del buf489
buf493 = torch.ops.aten.max_pool3d_with_indices.default(buf492, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf494 = buf493[0]
del buf493
buf497 = torch.ops.aten.max_pool3d_with_indices.default(buf496, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf498 = buf497[0]
del buf497
buf501 = torch.ops.aten.max_pool3d_with_indices.default(buf500, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf502 = buf501[0]
del buf501
buf504 = buf500
del buf500
buf508 = buf496
del buf496
buf512 = buf492
del buf492
buf520 = buf488
del buf488
buf524 = buf486
del buf486
buf528 = buf482
del buf482
triton_poi_fused_minimum_neg_2[grid(256)](buf494, buf502, buf498,
buf504, buf508, buf512, buf520, buf524, buf528, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf505 = torch.ops.aten.max_pool3d_with_indices.default(buf504, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf504
buf506 = buf505[0]
del buf505
buf509 = torch.ops.aten.max_pool3d_with_indices.default(buf508, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf508
buf510 = buf509[0]
del buf509
buf513 = torch.ops.aten.max_pool3d_with_indices.default(buf512, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf512
buf514 = buf513[0]
del buf513
buf516 = buf506
del buf506
triton_poi_fused_minimum_neg_3[grid(256)](buf516, buf514, buf510,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf517 = torch.ops.aten.max_pool3d_with_indices.default(buf516, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf518 = buf517[0]
del buf517
buf521 = torch.ops.aten.max_pool3d_with_indices.default(buf520, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf522 = buf521[0]
del buf521
buf525 = torch.ops.aten.max_pool3d_with_indices.default(buf524, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf526 = buf525[0]
del buf525
buf529 = torch.ops.aten.max_pool3d_with_indices.default(buf528, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf530 = buf529[0]
del buf529
buf532 = buf528
del buf528
buf536 = buf524
del buf524
buf540 = buf520
del buf520
buf548 = buf516
del buf516
buf552 = buf514
del buf514
buf556 = buf510
del buf510
triton_poi_fused_minimum_neg_2[grid(256)](buf522, buf530, buf526,
buf532, buf536, buf540, buf548, buf552, buf556, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf533 = torch.ops.aten.max_pool3d_with_indices.default(buf532, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf532
buf534 = buf533[0]
del buf533
buf537 = torch.ops.aten.max_pool3d_with_indices.default(buf536, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf536
buf538 = buf537[0]
del buf537
buf541 = torch.ops.aten.max_pool3d_with_indices.default(buf540, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf540
buf542 = buf541[0]
del buf541
buf544 = buf534
del buf534
triton_poi_fused_minimum_neg_3[grid(256)](buf544, buf542, buf538,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf545 = torch.ops.aten.max_pool3d_with_indices.default(buf544, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf546 = buf545[0]
del buf545
buf549 = torch.ops.aten.max_pool3d_with_indices.default(buf548, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf550 = buf549[0]
del buf549
buf553 = torch.ops.aten.max_pool3d_with_indices.default(buf552, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf554 = buf553[0]
del buf553
buf557 = torch.ops.aten.max_pool3d_with_indices.default(buf556, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf558 = buf557[0]
del buf557
buf560 = buf556
del buf556
buf564 = buf552
del buf552
buf568 = buf548
del buf548
buf576 = buf544
del buf544
buf580 = buf542
del buf542
buf584 = buf538
del buf538
triton_poi_fused_minimum_neg_2[grid(256)](buf550, buf558, buf554,
buf560, buf564, buf568, buf576, buf580, buf584, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf561 = torch.ops.aten.max_pool3d_with_indices.default(buf560, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf560
buf562 = buf561[0]
del buf561
buf565 = torch.ops.aten.max_pool3d_with_indices.default(buf564, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf564
buf566 = buf565[0]
del buf565
buf569 = torch.ops.aten.max_pool3d_with_indices.default(buf568, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf568
buf570 = buf569[0]
del buf569
buf572 = buf562
del buf562
triton_poi_fused_minimum_neg_3[grid(256)](buf572, buf570, buf566,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf573 = torch.ops.aten.max_pool3d_with_indices.default(buf572, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf574 = buf573[0]
del buf573
buf577 = torch.ops.aten.max_pool3d_with_indices.default(buf576, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf578 = buf577[0]
del buf577
buf581 = torch.ops.aten.max_pool3d_with_indices.default(buf580, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf582 = buf581[0]
del buf581
buf585 = torch.ops.aten.max_pool3d_with_indices.default(buf584, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf586 = buf585[0]
del buf585
buf588 = buf584
del buf584
buf592 = buf580
del buf580
buf596 = buf576
del buf576
buf604 = buf572
del buf572
buf608 = buf570
del buf570
buf612 = buf566
del buf566
triton_poi_fused_minimum_neg_2[grid(256)](buf578, buf586, buf582,
buf588, buf592, buf596, buf604, buf608, buf612, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf589 = torch.ops.aten.max_pool3d_with_indices.default(buf588, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf588
buf590 = buf589[0]
del buf589
buf593 = torch.ops.aten.max_pool3d_with_indices.default(buf592, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf592
buf594 = buf593[0]
del buf593
buf597 = torch.ops.aten.max_pool3d_with_indices.default(buf596, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf596
buf598 = buf597[0]
del buf597
buf600 = buf590
del buf590
triton_poi_fused_minimum_neg_3[grid(256)](buf600, buf598, buf594,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf601 = torch.ops.aten.max_pool3d_with_indices.default(buf600, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf602 = buf601[0]
del buf601
buf605 = torch.ops.aten.max_pool3d_with_indices.default(buf604, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf606 = buf605[0]
del buf605
buf609 = torch.ops.aten.max_pool3d_with_indices.default(buf608, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf610 = buf609[0]
del buf609
buf613 = torch.ops.aten.max_pool3d_with_indices.default(buf612, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf614 = buf613[0]
del buf613
buf616 = buf612
del buf612
buf620 = buf608
del buf608
buf624 = buf604
del buf604
buf632 = buf600
del buf600
buf636 = buf598
del buf598
buf640 = buf594
del buf594
triton_poi_fused_minimum_neg_2[grid(256)](buf606, buf614, buf610,
buf616, buf620, buf624, buf632, buf636, buf640, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf617 = torch.ops.aten.max_pool3d_with_indices.default(buf616, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf616
buf618 = buf617[0]
del buf617
buf621 = torch.ops.aten.max_pool3d_with_indices.default(buf620, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf620
buf622 = buf621[0]
del buf621
buf625 = torch.ops.aten.max_pool3d_with_indices.default(buf624, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf624
buf626 = buf625[0]
del buf625
buf628 = buf618
del buf618
triton_poi_fused_minimum_neg_3[grid(256)](buf628, buf626, buf622,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf629 = torch.ops.aten.max_pool3d_with_indices.default(buf628, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf630 = buf629[0]
del buf629
buf633 = torch.ops.aten.max_pool3d_with_indices.default(buf632, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf634 = buf633[0]
del buf633
buf637 = torch.ops.aten.max_pool3d_with_indices.default(buf636, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf638 = buf637[0]
del buf637
buf641 = torch.ops.aten.max_pool3d_with_indices.default(buf640, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf642 = buf641[0]
del buf641
buf644 = buf640
del buf640
buf648 = buf636
del buf636
buf652 = buf632
del buf632
buf660 = buf628
del buf628
buf664 = buf626
del buf626
buf668 = buf622
del buf622
triton_poi_fused_minimum_neg_2[grid(256)](buf634, buf642, buf638,
buf644, buf648, buf652, buf660, buf664, buf668, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf645 = torch.ops.aten.max_pool3d_with_indices.default(buf644, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf644
buf646 = buf645[0]
del buf645
buf649 = torch.ops.aten.max_pool3d_with_indices.default(buf648, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf648
buf650 = buf649[0]
del buf649
buf653 = torch.ops.aten.max_pool3d_with_indices.default(buf652, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf652
buf654 = buf653[0]
del buf653
buf656 = buf646
del buf646
triton_poi_fused_minimum_neg_3[grid(256)](buf656, buf654, buf650,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf657 = torch.ops.aten.max_pool3d_with_indices.default(buf656, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf658 = buf657[0]
del buf657
buf661 = torch.ops.aten.max_pool3d_with_indices.default(buf660, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf662 = buf661[0]
del buf661
buf665 = torch.ops.aten.max_pool3d_with_indices.default(buf664, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf666 = buf665[0]
del buf665
buf669 = torch.ops.aten.max_pool3d_with_indices.default(buf668, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf670 = buf669[0]
del buf669
buf672 = buf668
del buf668
buf676 = buf664
del buf664
buf680 = buf660
del buf660
buf688 = buf656
del buf656
buf692 = buf654
del buf654
buf696 = buf650
del buf650
triton_poi_fused_minimum_neg_2[grid(256)](buf662, buf670, buf666,
buf672, buf676, buf680, buf688, buf692, buf696, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf673 = torch.ops.aten.max_pool3d_with_indices.default(buf672, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf672
buf674 = buf673[0]
del buf673
buf677 = torch.ops.aten.max_pool3d_with_indices.default(buf676, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf676
buf678 = buf677[0]
del buf677
buf681 = torch.ops.aten.max_pool3d_with_indices.default(buf680, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf680
buf682 = buf681[0]
del buf681
buf684 = buf674
del buf674
triton_poi_fused_minimum_neg_3[grid(256)](buf684, buf682, buf678,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf685 = torch.ops.aten.max_pool3d_with_indices.default(buf684, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf686 = buf685[0]
del buf685
buf689 = torch.ops.aten.max_pool3d_with_indices.default(buf688, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf690 = buf689[0]
del buf689
buf693 = torch.ops.aten.max_pool3d_with_indices.default(buf692, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf694 = buf693[0]
del buf693
buf697 = torch.ops.aten.max_pool3d_with_indices.default(buf696, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf698 = buf697[0]
del buf697
buf700 = buf696
del buf696
buf704 = buf692
del buf692
buf708 = buf688
del buf688
buf716 = buf684
del buf684
buf720 = buf682
del buf682
buf724 = buf678
del buf678
triton_poi_fused_minimum_neg_2[grid(256)](buf690, buf698, buf694,
buf700, buf704, buf708, buf716, buf720, buf724, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf701 = torch.ops.aten.max_pool3d_with_indices.default(buf700, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf700
buf702 = buf701[0]
del buf701
buf705 = torch.ops.aten.max_pool3d_with_indices.default(buf704, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf704
buf706 = buf705[0]
del buf705
buf709 = torch.ops.aten.max_pool3d_with_indices.default(buf708, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf708
buf710 = buf709[0]
del buf709
buf712 = buf702
del buf702
triton_poi_fused_minimum_neg_3[grid(256)](buf712, buf710, buf706,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf713 = torch.ops.aten.max_pool3d_with_indices.default(buf712, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf714 = buf713[0]
del buf713
buf717 = torch.ops.aten.max_pool3d_with_indices.default(buf716, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf718 = buf717[0]
del buf717
buf721 = torch.ops.aten.max_pool3d_with_indices.default(buf720, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf722 = buf721[0]
del buf721
buf725 = torch.ops.aten.max_pool3d_with_indices.default(buf724, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf726 = buf725[0]
del buf725
buf728 = buf724
del buf724
buf732 = buf720
del buf720
buf736 = buf716
del buf716
buf744 = buf712
del buf712
buf748 = buf710
del buf710
buf752 = buf706
del buf706
triton_poi_fused_minimum_neg_2[grid(256)](buf718, buf726, buf722,
buf728, buf732, buf736, buf744, buf748, buf752, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf729 = torch.ops.aten.max_pool3d_with_indices.default(buf728, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf728
buf730 = buf729[0]
del buf729
buf733 = torch.ops.aten.max_pool3d_with_indices.default(buf732, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf732
buf734 = buf733[0]
del buf733
buf737 = torch.ops.aten.max_pool3d_with_indices.default(buf736, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf736
buf738 = buf737[0]
del buf737
buf740 = buf730
del buf730
triton_poi_fused_minimum_neg_3[grid(256)](buf740, buf738, buf734,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf741 = torch.ops.aten.max_pool3d_with_indices.default(buf740, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf742 = buf741[0]
del buf741
buf745 = torch.ops.aten.max_pool3d_with_indices.default(buf744, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf746 = buf745[0]
del buf745
buf749 = torch.ops.aten.max_pool3d_with_indices.default(buf748, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf750 = buf749[0]
del buf749
buf753 = torch.ops.aten.max_pool3d_with_indices.default(buf752, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf754 = buf753[0]
del buf753
buf756 = buf752
del buf752
buf760 = buf748
del buf748
buf764 = buf744
del buf744
buf772 = buf740
del buf740
buf776 = buf738
del buf738
buf780 = buf734
del buf734
triton_poi_fused_minimum_neg_2[grid(256)](buf746, buf754, buf750,
buf756, buf760, buf764, buf772, buf776, buf780, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf757 = torch.ops.aten.max_pool3d_with_indices.default(buf756, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf756
buf758 = buf757[0]
del buf757
buf761 = torch.ops.aten.max_pool3d_with_indices.default(buf760, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf760
buf762 = buf761[0]
del buf761
buf765 = torch.ops.aten.max_pool3d_with_indices.default(buf764, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf764
buf766 = buf765[0]
del buf765
buf768 = buf758
del buf758
triton_poi_fused_minimum_neg_3[grid(256)](buf768, buf766, buf762,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf769 = torch.ops.aten.max_pool3d_with_indices.default(buf768, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf770 = buf769[0]
del buf769
buf773 = torch.ops.aten.max_pool3d_with_indices.default(buf772, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf774 = buf773[0]
del buf773
buf777 = torch.ops.aten.max_pool3d_with_indices.default(buf776, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf778 = buf777[0]
del buf777
buf781 = torch.ops.aten.max_pool3d_with_indices.default(buf780, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf782 = buf781[0]
del buf781
buf784 = buf780
del buf780
buf788 = buf776
del buf776
buf792 = buf772
del buf772
buf800 = buf768
del buf768
buf804 = buf766
del buf766
buf808 = buf762
del buf762
triton_poi_fused_minimum_neg_2[grid(256)](buf774, buf782, buf778,
buf784, buf788, buf792, buf800, buf804, buf808, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf785 = torch.ops.aten.max_pool3d_with_indices.default(buf784, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf784
buf786 = buf785[0]
del buf785
buf789 = torch.ops.aten.max_pool3d_with_indices.default(buf788, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf788
buf790 = buf789[0]
del buf789
buf793 = torch.ops.aten.max_pool3d_with_indices.default(buf792, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf792
buf794 = buf793[0]
del buf793
buf796 = buf786
del buf786
triton_poi_fused_minimum_neg_3[grid(256)](buf796, buf794, buf790,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf797 = torch.ops.aten.max_pool3d_with_indices.default(buf796, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf798 = buf797[0]
del buf797
buf801 = torch.ops.aten.max_pool3d_with_indices.default(buf800, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf802 = buf801[0]
del buf801
buf805 = torch.ops.aten.max_pool3d_with_indices.default(buf804, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf806 = buf805[0]
del buf805
buf809 = torch.ops.aten.max_pool3d_with_indices.default(buf808, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf810 = buf809[0]
del buf809
buf812 = buf808
del buf808
buf816 = buf804
del buf804
buf820 = buf800
del buf800
buf828 = buf796
del buf796
buf832 = buf794
del buf794
buf836 = buf790
del buf790
triton_poi_fused_minimum_neg_2[grid(256)](buf802, buf810, buf806,
buf812, buf816, buf820, buf828, buf832, buf836, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf813 = torch.ops.aten.max_pool3d_with_indices.default(buf812, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf812
buf814 = buf813[0]
del buf813
buf817 = torch.ops.aten.max_pool3d_with_indices.default(buf816, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf816
buf818 = buf817[0]
del buf817
buf821 = torch.ops.aten.max_pool3d_with_indices.default(buf820, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf820
buf822 = buf821[0]
del buf821
buf824 = buf814
del buf814
triton_poi_fused_minimum_neg_3[grid(256)](buf824, buf822, buf818,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf825 = torch.ops.aten.max_pool3d_with_indices.default(buf824, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf826 = buf825[0]
del buf825
buf829 = torch.ops.aten.max_pool3d_with_indices.default(buf828, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf830 = buf829[0]
del buf829
buf833 = torch.ops.aten.max_pool3d_with_indices.default(buf832, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf834 = buf833[0]
del buf833
buf837 = torch.ops.aten.max_pool3d_with_indices.default(buf836, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf838 = buf837[0]
del buf837
buf840 = buf836
del buf836
buf844 = buf832
del buf832
buf848 = buf828
del buf828
buf856 = buf824
del buf824
buf860 = buf822
del buf822
buf864 = buf818
del buf818
triton_poi_fused_minimum_neg_2[grid(256)](buf830, buf838, buf834,
buf840, buf844, buf848, buf856, buf860, buf864, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf841 = torch.ops.aten.max_pool3d_with_indices.default(buf840, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf840
buf842 = buf841[0]
del buf841
buf845 = torch.ops.aten.max_pool3d_with_indices.default(buf844, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf844
buf846 = buf845[0]
del buf845
buf849 = torch.ops.aten.max_pool3d_with_indices.default(buf848, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf848
buf850 = buf849[0]
del buf849
buf852 = buf842
del buf842
triton_poi_fused_minimum_neg_3[grid(256)](buf852, buf850, buf846,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf853 = torch.ops.aten.max_pool3d_with_indices.default(buf852, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf854 = buf853[0]
del buf853
buf857 = torch.ops.aten.max_pool3d_with_indices.default(buf856, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf858 = buf857[0]
del buf857
buf861 = torch.ops.aten.max_pool3d_with_indices.default(buf860, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf862 = buf861[0]
del buf861
buf865 = torch.ops.aten.max_pool3d_with_indices.default(buf864, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf866 = buf865[0]
del buf865
buf868 = buf864
del buf864
buf872 = buf860
del buf860
buf876 = buf856
del buf856
buf884 = buf852
del buf852
buf888 = buf850
del buf850
buf892 = buf846
del buf846
triton_poi_fused_minimum_neg_2[grid(256)](buf858, buf866, buf862,
buf868, buf872, buf876, buf884, buf888, buf892, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf869 = torch.ops.aten.max_pool3d_with_indices.default(buf868, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf868
buf870 = buf869[0]
del buf869
buf873 = torch.ops.aten.max_pool3d_with_indices.default(buf872, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf872
buf874 = buf873[0]
del buf873
buf877 = torch.ops.aten.max_pool3d_with_indices.default(buf876, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf876
buf878 = buf877[0]
del buf877
buf880 = buf870
del buf870
triton_poi_fused_minimum_neg_3[grid(256)](buf880, buf878, buf874,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf881 = torch.ops.aten.max_pool3d_with_indices.default(buf880, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf882 = buf881[0]
del buf881
buf885 = torch.ops.aten.max_pool3d_with_indices.default(buf884, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf886 = buf885[0]
del buf885
buf889 = torch.ops.aten.max_pool3d_with_indices.default(buf888, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf890 = buf889[0]
del buf889
buf893 = torch.ops.aten.max_pool3d_with_indices.default(buf892, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf894 = buf893[0]
del buf893
buf896 = buf892
del buf892
buf900 = buf888
del buf888
buf904 = buf884
del buf884
buf912 = buf880
del buf880
buf916 = buf878
del buf878
buf920 = buf874
del buf874
triton_poi_fused_minimum_neg_2[grid(256)](buf886, buf894, buf890,
buf896, buf900, buf904, buf912, buf916, buf920, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf897 = torch.ops.aten.max_pool3d_with_indices.default(buf896, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf896
buf898 = buf897[0]
del buf897
buf901 = torch.ops.aten.max_pool3d_with_indices.default(buf900, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf900
buf902 = buf901[0]
del buf901
buf905 = torch.ops.aten.max_pool3d_with_indices.default(buf904, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf904
buf906 = buf905[0]
del buf905
buf908 = buf898
del buf898
triton_poi_fused_minimum_neg_3[grid(256)](buf908, buf906, buf902,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf909 = torch.ops.aten.max_pool3d_with_indices.default(buf908, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf910 = buf909[0]
del buf909
buf913 = torch.ops.aten.max_pool3d_with_indices.default(buf912, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf914 = buf913[0]
del buf913
buf917 = torch.ops.aten.max_pool3d_with_indices.default(buf916, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf918 = buf917[0]
del buf917
buf921 = torch.ops.aten.max_pool3d_with_indices.default(buf920, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf922 = buf921[0]
del buf921
buf924 = buf920
del buf920
buf928 = buf916
del buf916
buf932 = buf912
del buf912
buf940 = buf908
del buf908
buf944 = buf906
del buf906
buf948 = buf902
del buf902
triton_poi_fused_minimum_neg_2[grid(256)](buf914, buf922, buf918,
buf924, buf928, buf932, buf940, buf944, buf948, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf925 = torch.ops.aten.max_pool3d_with_indices.default(buf924, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf924
buf926 = buf925[0]
del buf925
buf929 = torch.ops.aten.max_pool3d_with_indices.default(buf928, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf928
buf930 = buf929[0]
del buf929
buf933 = torch.ops.aten.max_pool3d_with_indices.default(buf932, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf932
buf934 = buf933[0]
del buf933
buf936 = buf926
del buf926
triton_poi_fused_minimum_neg_3[grid(256)](buf936, buf934, buf930,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf937 = torch.ops.aten.max_pool3d_with_indices.default(buf936, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf938 = buf937[0]
del buf937
buf941 = torch.ops.aten.max_pool3d_with_indices.default(buf940, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf942 = buf941[0]
del buf941
buf945 = torch.ops.aten.max_pool3d_with_indices.default(buf944, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf946 = buf945[0]
del buf945
buf949 = torch.ops.aten.max_pool3d_with_indices.default(buf948, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf950 = buf949[0]
del buf949
buf952 = buf948
del buf948
buf956 = buf944
del buf944
buf960 = buf940
del buf940
buf968 = buf936
del buf936
buf972 = buf934
del buf934
buf976 = buf930
del buf930
triton_poi_fused_minimum_neg_2[grid(256)](buf942, buf950, buf946,
buf952, buf956, buf960, buf968, buf972, buf976, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf953 = torch.ops.aten.max_pool3d_with_indices.default(buf952, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf952
buf954 = buf953[0]
del buf953
buf957 = torch.ops.aten.max_pool3d_with_indices.default(buf956, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf956
buf958 = buf957[0]
del buf957
buf961 = torch.ops.aten.max_pool3d_with_indices.default(buf960, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf960
buf962 = buf961[0]
del buf961
buf964 = buf954
del buf954
triton_poi_fused_minimum_neg_3[grid(256)](buf964, buf962, buf958,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf965 = torch.ops.aten.max_pool3d_with_indices.default(buf964, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf966 = buf965[0]
del buf965
buf969 = torch.ops.aten.max_pool3d_with_indices.default(buf968, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf970 = buf969[0]
del buf969
buf973 = torch.ops.aten.max_pool3d_with_indices.default(buf972, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf974 = buf973[0]
del buf973
buf977 = torch.ops.aten.max_pool3d_with_indices.default(buf976, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf978 = buf977[0]
del buf977
buf980 = buf976
del buf976
buf984 = buf972
del buf972
buf988 = buf968
del buf968
buf996 = buf964
del buf964
buf1000 = buf962
del buf962
buf1004 = buf958
del buf958
triton_poi_fused_minimum_neg_2[grid(256)](buf970, buf978, buf974,
buf980, buf984, buf988, buf996, buf1000, buf1004, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf981 = torch.ops.aten.max_pool3d_with_indices.default(buf980, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf980
buf982 = buf981[0]
del buf981
buf985 = torch.ops.aten.max_pool3d_with_indices.default(buf984, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf984
buf986 = buf985[0]
del buf985
buf989 = torch.ops.aten.max_pool3d_with_indices.default(buf988, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf988
buf990 = buf989[0]
del buf989
buf992 = buf982
del buf982
triton_poi_fused_minimum_neg_3[grid(256)](buf992, buf990, buf986,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf993 = torch.ops.aten.max_pool3d_with_indices.default(buf992, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf994 = buf993[0]
del buf993
buf997 = torch.ops.aten.max_pool3d_with_indices.default(buf996, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf998 = buf997[0]
del buf997
buf1001 = torch.ops.aten.max_pool3d_with_indices.default(buf1000, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1002 = buf1001[0]
del buf1001
buf1005 = torch.ops.aten.max_pool3d_with_indices.default(buf1004, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1006 = buf1005[0]
del buf1005
buf1008 = buf1004
del buf1004
buf1012 = buf1000
del buf1000
buf1016 = buf996
del buf996
buf1024 = buf992
del buf992
buf1028 = buf990
del buf990
buf1032 = buf986
del buf986
triton_poi_fused_minimum_neg_2[grid(256)](buf998, buf1006, buf1002,
buf1008, buf1012, buf1016, buf1024, buf1028, buf1032, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1009 = torch.ops.aten.max_pool3d_with_indices.default(buf1008, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1008
buf1010 = buf1009[0]
del buf1009
buf1013 = torch.ops.aten.max_pool3d_with_indices.default(buf1012, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1012
buf1014 = buf1013[0]
del buf1013
buf1017 = torch.ops.aten.max_pool3d_with_indices.default(buf1016, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1016
buf1018 = buf1017[0]
del buf1017
buf1020 = buf1010
del buf1010
triton_poi_fused_minimum_neg_3[grid(256)](buf1020, buf1018, buf1014,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1021 = torch.ops.aten.max_pool3d_with_indices.default(buf1020, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1022 = buf1021[0]
del buf1021
buf1025 = torch.ops.aten.max_pool3d_with_indices.default(buf1024, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1026 = buf1025[0]
del buf1025
buf1029 = torch.ops.aten.max_pool3d_with_indices.default(buf1028, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1030 = buf1029[0]
del buf1029
buf1033 = torch.ops.aten.max_pool3d_with_indices.default(buf1032, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1034 = buf1033[0]
del buf1033
buf1036 = buf1032
del buf1032
buf1040 = buf1028
del buf1028
buf1044 = buf1024
del buf1024
buf1052 = buf1020
del buf1020
buf1056 = buf1018
del buf1018
buf1060 = buf1014
del buf1014
triton_poi_fused_minimum_neg_2[grid(256)](buf1026, buf1034, buf1030,
buf1036, buf1040, buf1044, buf1052, buf1056, buf1060, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1037 = torch.ops.aten.max_pool3d_with_indices.default(buf1036, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1036
buf1038 = buf1037[0]
del buf1037
buf1041 = torch.ops.aten.max_pool3d_with_indices.default(buf1040, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1040
buf1042 = buf1041[0]
del buf1041
buf1045 = torch.ops.aten.max_pool3d_with_indices.default(buf1044, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1044
buf1046 = buf1045[0]
del buf1045
buf1048 = buf1038
del buf1038
triton_poi_fused_minimum_neg_3[grid(256)](buf1048, buf1046, buf1042,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1049 = torch.ops.aten.max_pool3d_with_indices.default(buf1048, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1050 = buf1049[0]
del buf1049
buf1053 = torch.ops.aten.max_pool3d_with_indices.default(buf1052, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1054 = buf1053[0]
del buf1053
buf1057 = torch.ops.aten.max_pool3d_with_indices.default(buf1056, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1058 = buf1057[0]
del buf1057
buf1061 = torch.ops.aten.max_pool3d_with_indices.default(buf1060, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1062 = buf1061[0]
del buf1061
buf1064 = buf1060
del buf1060
buf1068 = buf1056
del buf1056
buf1072 = buf1052
del buf1052
buf1080 = buf1048
del buf1048
buf1084 = buf1046
del buf1046
buf1088 = buf1042
del buf1042
triton_poi_fused_minimum_neg_2[grid(256)](buf1054, buf1062, buf1058,
buf1064, buf1068, buf1072, buf1080, buf1084, buf1088, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1065 = torch.ops.aten.max_pool3d_with_indices.default(buf1064, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1064
buf1066 = buf1065[0]
del buf1065
buf1069 = torch.ops.aten.max_pool3d_with_indices.default(buf1068, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1068
buf1070 = buf1069[0]
del buf1069
buf1073 = torch.ops.aten.max_pool3d_with_indices.default(buf1072, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1072
buf1074 = buf1073[0]
del buf1073
buf1076 = buf1066
del buf1066
triton_poi_fused_minimum_neg_3[grid(256)](buf1076, buf1074, buf1070,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1077 = torch.ops.aten.max_pool3d_with_indices.default(buf1076, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1078 = buf1077[0]
del buf1077
buf1081 = torch.ops.aten.max_pool3d_with_indices.default(buf1080, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1082 = buf1081[0]
del buf1081
buf1085 = torch.ops.aten.max_pool3d_with_indices.default(buf1084, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1086 = buf1085[0]
del buf1085
buf1089 = torch.ops.aten.max_pool3d_with_indices.default(buf1088, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1090 = buf1089[0]
del buf1089
buf1092 = buf1088
del buf1088
buf1096 = buf1084
del buf1084
buf1100 = buf1080
del buf1080
buf1108 = buf1076
del buf1076
buf1112 = buf1074
del buf1074
buf1116 = buf1070
del buf1070
triton_poi_fused_minimum_neg_2[grid(256)](buf1082, buf1090, buf1086,
buf1092, buf1096, buf1100, buf1108, buf1112, buf1116, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1093 = torch.ops.aten.max_pool3d_with_indices.default(buf1092, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1092
buf1094 = buf1093[0]
del buf1093
buf1097 = torch.ops.aten.max_pool3d_with_indices.default(buf1096, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1096
buf1098 = buf1097[0]
del buf1097
buf1101 = torch.ops.aten.max_pool3d_with_indices.default(buf1100, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1100
buf1102 = buf1101[0]
del buf1101
buf1104 = buf1094
del buf1094
triton_poi_fused_minimum_neg_3[grid(256)](buf1104, buf1102, buf1098,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1105 = torch.ops.aten.max_pool3d_with_indices.default(buf1104, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1106 = buf1105[0]
del buf1105
buf1109 = torch.ops.aten.max_pool3d_with_indices.default(buf1108, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1110 = buf1109[0]
del buf1109
buf1113 = torch.ops.aten.max_pool3d_with_indices.default(buf1112, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1114 = buf1113[0]
del buf1113
buf1117 = torch.ops.aten.max_pool3d_with_indices.default(buf1116, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1118 = buf1117[0]
del buf1117
buf1120 = buf1116
del buf1116
buf1124 = buf1112
del buf1112
buf1128 = buf1108
del buf1108
buf1136 = buf1104
del buf1104
buf1140 = buf1102
del buf1102
buf1144 = buf1098
del buf1098
triton_poi_fused_minimum_neg_2[grid(256)](buf1110, buf1118, buf1114,
buf1120, buf1124, buf1128, buf1136, buf1140, buf1144, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1121 = torch.ops.aten.max_pool3d_with_indices.default(buf1120, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1120
buf1122 = buf1121[0]
del buf1121
buf1125 = torch.ops.aten.max_pool3d_with_indices.default(buf1124, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1124
buf1126 = buf1125[0]
del buf1125
buf1129 = torch.ops.aten.max_pool3d_with_indices.default(buf1128, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1128
buf1130 = buf1129[0]
del buf1129
buf1132 = buf1122
del buf1122
triton_poi_fused_minimum_neg_3[grid(256)](buf1132, buf1130, buf1126,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1133 = torch.ops.aten.max_pool3d_with_indices.default(buf1132, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1134 = buf1133[0]
del buf1133
buf1137 = torch.ops.aten.max_pool3d_with_indices.default(buf1136, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1138 = buf1137[0]
del buf1137
buf1141 = torch.ops.aten.max_pool3d_with_indices.default(buf1140, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1142 = buf1141[0]
del buf1141
buf1145 = torch.ops.aten.max_pool3d_with_indices.default(buf1144, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1146 = buf1145[0]
del buf1145
buf1148 = buf1144
del buf1144
buf1152 = buf1140
del buf1140
buf1156 = buf1136
del buf1136
buf1164 = buf1132
del buf1132
buf1168 = buf1130
del buf1130
buf1172 = buf1126
del buf1126
triton_poi_fused_minimum_neg_2[grid(256)](buf1138, buf1146, buf1142,
buf1148, buf1152, buf1156, buf1164, buf1168, buf1172, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1149 = torch.ops.aten.max_pool3d_with_indices.default(buf1148, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1148
buf1150 = buf1149[0]
del buf1149
buf1153 = torch.ops.aten.max_pool3d_with_indices.default(buf1152, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1152
buf1154 = buf1153[0]
del buf1153
buf1157 = torch.ops.aten.max_pool3d_with_indices.default(buf1156, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1156
buf1158 = buf1157[0]
del buf1157
buf1160 = buf1150
del buf1150
triton_poi_fused_minimum_neg_3[grid(256)](buf1160, buf1158, buf1154,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1161 = torch.ops.aten.max_pool3d_with_indices.default(buf1160, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1162 = buf1161[0]
del buf1161
buf1165 = torch.ops.aten.max_pool3d_with_indices.default(buf1164, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1166 = buf1165[0]
del buf1165
buf1169 = torch.ops.aten.max_pool3d_with_indices.default(buf1168, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1170 = buf1169[0]
del buf1169
buf1173 = torch.ops.aten.max_pool3d_with_indices.default(buf1172, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1174 = buf1173[0]
del buf1173
buf1176 = buf1172
del buf1172
buf1180 = buf1168
del buf1168
buf1184 = buf1164
del buf1164
buf1192 = buf1160
del buf1160
buf1196 = buf1158
del buf1158
buf1200 = buf1154
del buf1154
triton_poi_fused_minimum_neg_2[grid(256)](buf1166, buf1174, buf1170,
buf1176, buf1180, buf1184, buf1192, buf1196, buf1200, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1177 = torch.ops.aten.max_pool3d_with_indices.default(buf1176, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1176
buf1178 = buf1177[0]
del buf1177
buf1181 = torch.ops.aten.max_pool3d_with_indices.default(buf1180, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1180
buf1182 = buf1181[0]
del buf1181
buf1185 = torch.ops.aten.max_pool3d_with_indices.default(buf1184, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1184
buf1186 = buf1185[0]
del buf1185
buf1188 = buf1178
del buf1178
triton_poi_fused_minimum_neg_3[grid(256)](buf1188, buf1186, buf1182,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1189 = torch.ops.aten.max_pool3d_with_indices.default(buf1188, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1190 = buf1189[0]
del buf1189
buf1193 = torch.ops.aten.max_pool3d_with_indices.default(buf1192, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1194 = buf1193[0]
del buf1193
buf1197 = torch.ops.aten.max_pool3d_with_indices.default(buf1196, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1198 = buf1197[0]
del buf1197
buf1201 = torch.ops.aten.max_pool3d_with_indices.default(buf1200, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1202 = buf1201[0]
del buf1201
buf1204 = buf1200
del buf1200
buf1208 = buf1196
del buf1196
buf1212 = buf1192
del buf1192
buf1220 = buf1188
del buf1188
buf1224 = buf1186
del buf1186
buf1228 = buf1182
del buf1182
triton_poi_fused_minimum_neg_2[grid(256)](buf1194, buf1202, buf1198,
buf1204, buf1208, buf1212, buf1220, buf1224, buf1228, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1205 = torch.ops.aten.max_pool3d_with_indices.default(buf1204, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1204
buf1206 = buf1205[0]
del buf1205
buf1209 = torch.ops.aten.max_pool3d_with_indices.default(buf1208, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1208
buf1210 = buf1209[0]
del buf1209
buf1213 = torch.ops.aten.max_pool3d_with_indices.default(buf1212, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1212
buf1214 = buf1213[0]
del buf1213
buf1216 = buf1206
del buf1206
triton_poi_fused_minimum_neg_3[grid(256)](buf1216, buf1214, buf1210,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1217 = torch.ops.aten.max_pool3d_with_indices.default(buf1216, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1218 = buf1217[0]
del buf1217
buf1221 = torch.ops.aten.max_pool3d_with_indices.default(buf1220, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1222 = buf1221[0]
del buf1221
buf1225 = torch.ops.aten.max_pool3d_with_indices.default(buf1224, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1226 = buf1225[0]
del buf1225
buf1229 = torch.ops.aten.max_pool3d_with_indices.default(buf1228, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1230 = buf1229[0]
del buf1229
buf1232 = buf1228
del buf1228
buf1236 = buf1224
del buf1224
buf1240 = buf1220
del buf1220
buf1248 = buf1216
del buf1216
buf1252 = buf1214
del buf1214
buf1256 = buf1210
del buf1210
triton_poi_fused_minimum_neg_2[grid(256)](buf1222, buf1230, buf1226,
buf1232, buf1236, buf1240, buf1248, buf1252, buf1256, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1233 = torch.ops.aten.max_pool3d_with_indices.default(buf1232, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1232
buf1234 = buf1233[0]
del buf1233
buf1237 = torch.ops.aten.max_pool3d_with_indices.default(buf1236, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1236
buf1238 = buf1237[0]
del buf1237
buf1241 = torch.ops.aten.max_pool3d_with_indices.default(buf1240, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1240
buf1242 = buf1241[0]
del buf1241
buf1244 = buf1234
del buf1234
triton_poi_fused_minimum_neg_3[grid(256)](buf1244, buf1242, buf1238,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1245 = torch.ops.aten.max_pool3d_with_indices.default(buf1244, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1246 = buf1245[0]
del buf1245
buf1249 = torch.ops.aten.max_pool3d_with_indices.default(buf1248, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1250 = buf1249[0]
del buf1249
buf1253 = torch.ops.aten.max_pool3d_with_indices.default(buf1252, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1254 = buf1253[0]
del buf1253
buf1257 = torch.ops.aten.max_pool3d_with_indices.default(buf1256, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1258 = buf1257[0]
del buf1257
buf1260 = buf1256
del buf1256
buf1264 = buf1252
del buf1252
buf1268 = buf1248
del buf1248
buf1276 = buf1244
del buf1244
buf1280 = buf1242
del buf1242
buf1284 = buf1238
del buf1238
triton_poi_fused_minimum_neg_2[grid(256)](buf1250, buf1258, buf1254,
buf1260, buf1264, buf1268, buf1276, buf1280, buf1284, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1261 = torch.ops.aten.max_pool3d_with_indices.default(buf1260, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1260
buf1262 = buf1261[0]
del buf1261
buf1265 = torch.ops.aten.max_pool3d_with_indices.default(buf1264, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1264
buf1266 = buf1265[0]
del buf1265
buf1269 = torch.ops.aten.max_pool3d_with_indices.default(buf1268, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1268
buf1270 = buf1269[0]
del buf1269
buf1272 = buf1262
del buf1262
triton_poi_fused_minimum_neg_3[grid(256)](buf1272, buf1270, buf1266,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1273 = torch.ops.aten.max_pool3d_with_indices.default(buf1272, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1274 = buf1273[0]
del buf1273
buf1277 = torch.ops.aten.max_pool3d_with_indices.default(buf1276, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1278 = buf1277[0]
del buf1277
buf1281 = torch.ops.aten.max_pool3d_with_indices.default(buf1280, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1282 = buf1281[0]
del buf1281
buf1285 = torch.ops.aten.max_pool3d_with_indices.default(buf1284, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1286 = buf1285[0]
del buf1285
buf1288 = buf1284
del buf1284
buf1292 = buf1280
del buf1280
buf1296 = buf1276
del buf1276
buf1304 = buf1272
del buf1272
buf1308 = buf1270
del buf1270
buf1312 = buf1266
del buf1266
triton_poi_fused_minimum_neg_2[grid(256)](buf1278, buf1286, buf1282,
buf1288, buf1292, buf1296, buf1304, buf1308, buf1312, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1289 = torch.ops.aten.max_pool3d_with_indices.default(buf1288, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1288
buf1290 = buf1289[0]
del buf1289
buf1293 = torch.ops.aten.max_pool3d_with_indices.default(buf1292, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1292
buf1294 = buf1293[0]
del buf1293
buf1297 = torch.ops.aten.max_pool3d_with_indices.default(buf1296, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1296
buf1298 = buf1297[0]
del buf1297
buf1300 = buf1290
del buf1290
triton_poi_fused_minimum_neg_3[grid(256)](buf1300, buf1298, buf1294,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1301 = torch.ops.aten.max_pool3d_with_indices.default(buf1300, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1302 = buf1301[0]
del buf1301
buf1305 = torch.ops.aten.max_pool3d_with_indices.default(buf1304, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1306 = buf1305[0]
del buf1305
buf1309 = torch.ops.aten.max_pool3d_with_indices.default(buf1308, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1310 = buf1309[0]
del buf1309
buf1313 = torch.ops.aten.max_pool3d_with_indices.default(buf1312, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1314 = buf1313[0]
del buf1313
buf1316 = buf1312
del buf1312
buf1320 = buf1308
del buf1308
buf1324 = buf1304
del buf1304
buf1332 = buf1300
del buf1300
buf1336 = buf1298
del buf1298
buf1340 = buf1294
del buf1294
triton_poi_fused_minimum_neg_2[grid(256)](buf1306, buf1314, buf1310,
buf1316, buf1320, buf1324, buf1332, buf1336, buf1340, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1317 = torch.ops.aten.max_pool3d_with_indices.default(buf1316, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1316
buf1318 = buf1317[0]
del buf1317
buf1321 = torch.ops.aten.max_pool3d_with_indices.default(buf1320, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1320
buf1322 = buf1321[0]
del buf1321
buf1325 = torch.ops.aten.max_pool3d_with_indices.default(buf1324, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1324
buf1326 = buf1325[0]
del buf1325
buf1328 = buf1318
del buf1318
triton_poi_fused_minimum_neg_3[grid(256)](buf1328, buf1326, buf1322,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1329 = torch.ops.aten.max_pool3d_with_indices.default(buf1328, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1330 = buf1329[0]
del buf1329
buf1333 = torch.ops.aten.max_pool3d_with_indices.default(buf1332, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1334 = buf1333[0]
del buf1333
buf1337 = torch.ops.aten.max_pool3d_with_indices.default(buf1336, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1338 = buf1337[0]
del buf1337
buf1341 = torch.ops.aten.max_pool3d_with_indices.default(buf1340, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1342 = buf1341[0]
del buf1341
buf1344 = buf1340
del buf1340
buf1348 = buf1336
del buf1336
buf1352 = buf1332
del buf1332
buf1360 = buf1328
del buf1328
buf1364 = buf1326
del buf1326
buf1368 = buf1322
del buf1322
triton_poi_fused_minimum_neg_2[grid(256)](buf1334, buf1342, buf1338,
buf1344, buf1348, buf1352, buf1360, buf1364, buf1368, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1345 = torch.ops.aten.max_pool3d_with_indices.default(buf1344, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1344
buf1346 = buf1345[0]
del buf1345
buf1349 = torch.ops.aten.max_pool3d_with_indices.default(buf1348, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1348
buf1350 = buf1349[0]
del buf1349
buf1353 = torch.ops.aten.max_pool3d_with_indices.default(buf1352, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1352
buf1354 = buf1353[0]
del buf1353
buf1356 = buf1346
del buf1346
triton_poi_fused_minimum_neg_3[grid(256)](buf1356, buf1354, buf1350,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1357 = torch.ops.aten.max_pool3d_with_indices.default(buf1356, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1358 = buf1357[0]
del buf1357
buf1361 = torch.ops.aten.max_pool3d_with_indices.default(buf1360, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1362 = buf1361[0]
del buf1361
buf1365 = torch.ops.aten.max_pool3d_with_indices.default(buf1364, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1366 = buf1365[0]
del buf1365
buf1369 = torch.ops.aten.max_pool3d_with_indices.default(buf1368, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1370 = buf1369[0]
del buf1369
buf1372 = buf1368
del buf1368
buf1376 = buf1364
del buf1364
buf1380 = buf1360
del buf1360
buf1388 = buf1356
del buf1356
buf1392 = buf1354
del buf1354
buf1396 = buf1350
del buf1350
triton_poi_fused_minimum_neg_2[grid(256)](buf1362, buf1370, buf1366,
buf1372, buf1376, buf1380, buf1388, buf1392, buf1396, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1373 = torch.ops.aten.max_pool3d_with_indices.default(buf1372, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1372
buf1374 = buf1373[0]
del buf1373
buf1377 = torch.ops.aten.max_pool3d_with_indices.default(buf1376, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1376
buf1378 = buf1377[0]
del buf1377
buf1381 = torch.ops.aten.max_pool3d_with_indices.default(buf1380, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1380
buf1382 = buf1381[0]
del buf1381
buf1384 = buf1374
del buf1374
triton_poi_fused_minimum_neg_3[grid(256)](buf1384, buf1382, buf1378,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1378
del buf1382
buf1385 = torch.ops.aten.max_pool3d_with_indices.default(buf1384, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf1384
buf1386 = buf1385[0]
del buf1385
buf1389 = torch.ops.aten.max_pool3d_with_indices.default(buf1388, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1390 = buf1389[0]
del buf1389
buf1393 = torch.ops.aten.max_pool3d_with_indices.default(buf1392, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1394 = buf1393[0]
del buf1393
buf1397 = torch.ops.aten.max_pool3d_with_indices.default(buf1396, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1398 = buf1397[0]
del buf1397
buf1400 = buf1396
del buf1396
buf1404 = buf1392
del buf1392
buf1408 = buf1388
del buf1388
triton_poi_fused_minimum_neg_4[grid(256)](buf1390, buf1398, buf1394,
buf1400, buf1404, buf1408, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf1401 = torch.ops.aten.max_pool3d_with_indices.default(buf1400, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1402 = buf1401[0]
del buf1401
buf1405 = torch.ops.aten.max_pool3d_with_indices.default(buf1404, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1406 = buf1405[0]
del buf1405
buf1409 = torch.ops.aten.max_pool3d_with_indices.default(buf1408, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1410 = buf1409[0]
del buf1409
buf1412 = buf1402
del buf1402
triton_poi_fused_minimum_neg_3[grid(256)](buf1412, buf1410, buf1406,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1413 = torch.ops.aten.max_pool3d_with_indices.default(buf1412, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1414 = buf1413[0]
del buf1413
buf1416 = buf1412
del buf1412
buf1420 = buf1410
del buf1410
buf1424 = buf1406
del buf1406
buf1432 = buf1408
del buf1408
buf1436 = buf1404
del buf1404
buf1440 = buf1400
del buf1400
triton_poi_fused_neg_0[grid(256)](arg1_1, buf1416, buf1420, buf1424,
buf1432, buf1436, buf1440, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf1417 = torch.ops.aten.max_pool3d_with_indices.default(buf1416, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1416
buf1418 = buf1417[0]
del buf1417
buf1421 = torch.ops.aten.max_pool3d_with_indices.default(buf1420, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1420
buf1422 = buf1421[0]
del buf1421
buf1425 = torch.ops.aten.max_pool3d_with_indices.default(buf1424, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1424
buf1426 = buf1425[0]
del buf1425
buf1428 = buf1418
del buf1418
triton_poi_fused_minimum_neg_3[grid(256)](buf1428, buf1426, buf1422,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1429 = torch.ops.aten.max_pool3d_with_indices.default(buf1428, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1430 = buf1429[0]
del buf1429
buf1433 = torch.ops.aten.max_pool3d_with_indices.default(buf1432, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1434 = buf1433[0]
del buf1433
buf1437 = torch.ops.aten.max_pool3d_with_indices.default(buf1436, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1438 = buf1437[0]
del buf1437
buf1441 = torch.ops.aten.max_pool3d_with_indices.default(buf1440, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1442 = buf1441[0]
del buf1441
buf1444 = buf1440
del buf1440
buf1448 = buf1436
del buf1436
buf1452 = buf1432
del buf1432
buf1460 = buf1428
del buf1428
buf1464 = buf1426
del buf1426
buf1468 = buf1422
del buf1422
triton_poi_fused_minimum_neg_2[grid(256)](buf1434, buf1442, buf1438,
buf1444, buf1448, buf1452, buf1460, buf1464, buf1468, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1445 = torch.ops.aten.max_pool3d_with_indices.default(buf1444, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1444
buf1446 = buf1445[0]
del buf1445
buf1449 = torch.ops.aten.max_pool3d_with_indices.default(buf1448, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1448
buf1450 = buf1449[0]
del buf1449
buf1453 = torch.ops.aten.max_pool3d_with_indices.default(buf1452, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1452
buf1454 = buf1453[0]
del buf1453
buf1456 = buf1446
del buf1446
triton_poi_fused_minimum_neg_3[grid(256)](buf1456, buf1454, buf1450,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1457 = torch.ops.aten.max_pool3d_with_indices.default(buf1456, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1458 = buf1457[0]
del buf1457
buf1461 = torch.ops.aten.max_pool3d_with_indices.default(buf1460, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1462 = buf1461[0]
del buf1461
buf1465 = torch.ops.aten.max_pool3d_with_indices.default(buf1464, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1466 = buf1465[0]
del buf1465
buf1469 = torch.ops.aten.max_pool3d_with_indices.default(buf1468, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1470 = buf1469[0]
del buf1469
buf1472 = buf1468
del buf1468
buf1476 = buf1464
del buf1464
buf1480 = buf1460
del buf1460
buf1488 = buf1456
del buf1456
buf1492 = buf1454
del buf1454
buf1496 = buf1450
del buf1450
triton_poi_fused_minimum_neg_2[grid(256)](buf1462, buf1470, buf1466,
buf1472, buf1476, buf1480, buf1488, buf1492, buf1496, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1473 = torch.ops.aten.max_pool3d_with_indices.default(buf1472, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1472
buf1474 = buf1473[0]
del buf1473
buf1477 = torch.ops.aten.max_pool3d_with_indices.default(buf1476, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1476
buf1478 = buf1477[0]
del buf1477
buf1481 = torch.ops.aten.max_pool3d_with_indices.default(buf1480, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1480
buf1482 = buf1481[0]
del buf1481
buf1484 = buf1474
del buf1474
triton_poi_fused_minimum_neg_3[grid(256)](buf1484, buf1482, buf1478,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1485 = torch.ops.aten.max_pool3d_with_indices.default(buf1484, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1486 = buf1485[0]
del buf1485
buf1489 = torch.ops.aten.max_pool3d_with_indices.default(buf1488, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1490 = buf1489[0]
del buf1489
buf1493 = torch.ops.aten.max_pool3d_with_indices.default(buf1492, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1494 = buf1493[0]
del buf1493
buf1497 = torch.ops.aten.max_pool3d_with_indices.default(buf1496, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1498 = buf1497[0]
del buf1497
buf1500 = buf1496
del buf1496
buf1504 = buf1492
del buf1492
buf1508 = buf1488
del buf1488
buf1516 = buf1484
del buf1484
buf1520 = buf1482
del buf1482
buf1524 = buf1478
del buf1478
triton_poi_fused_minimum_neg_2[grid(256)](buf1490, buf1498, buf1494,
buf1500, buf1504, buf1508, buf1516, buf1520, buf1524, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1501 = torch.ops.aten.max_pool3d_with_indices.default(buf1500, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1500
buf1502 = buf1501[0]
del buf1501
buf1505 = torch.ops.aten.max_pool3d_with_indices.default(buf1504, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1504
buf1506 = buf1505[0]
del buf1505
buf1509 = torch.ops.aten.max_pool3d_with_indices.default(buf1508, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1508
buf1510 = buf1509[0]
del buf1509
buf1512 = buf1502
del buf1502
triton_poi_fused_minimum_neg_3[grid(256)](buf1512, buf1510, buf1506,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1513 = torch.ops.aten.max_pool3d_with_indices.default(buf1512, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1514 = buf1513[0]
del buf1513
buf1517 = torch.ops.aten.max_pool3d_with_indices.default(buf1516, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1518 = buf1517[0]
del buf1517
buf1521 = torch.ops.aten.max_pool3d_with_indices.default(buf1520, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1522 = buf1521[0]
del buf1521
buf1525 = torch.ops.aten.max_pool3d_with_indices.default(buf1524, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1526 = buf1525[0]
del buf1525
buf1528 = buf1524
del buf1524
buf1532 = buf1520
del buf1520
buf1536 = buf1516
del buf1516
buf1544 = buf1512
del buf1512
buf1548 = buf1510
del buf1510
buf1552 = buf1506
del buf1506
triton_poi_fused_minimum_neg_2[grid(256)](buf1518, buf1526, buf1522,
buf1528, buf1532, buf1536, buf1544, buf1548, buf1552, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1529 = torch.ops.aten.max_pool3d_with_indices.default(buf1528, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1528
buf1530 = buf1529[0]
del buf1529
buf1533 = torch.ops.aten.max_pool3d_with_indices.default(buf1532, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1532
buf1534 = buf1533[0]
del buf1533
buf1537 = torch.ops.aten.max_pool3d_with_indices.default(buf1536, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1536
buf1538 = buf1537[0]
del buf1537
buf1540 = buf1530
del buf1530
triton_poi_fused_minimum_neg_3[grid(256)](buf1540, buf1538, buf1534,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1541 = torch.ops.aten.max_pool3d_with_indices.default(buf1540, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1542 = buf1541[0]
del buf1541
buf1545 = torch.ops.aten.max_pool3d_with_indices.default(buf1544, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1546 = buf1545[0]
del buf1545
buf1549 = torch.ops.aten.max_pool3d_with_indices.default(buf1548, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1550 = buf1549[0]
del buf1549
buf1553 = torch.ops.aten.max_pool3d_with_indices.default(buf1552, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1554 = buf1553[0]
del buf1553
buf1556 = buf1552
del buf1552
buf1560 = buf1548
del buf1548
buf1564 = buf1544
del buf1544
buf1572 = buf1540
del buf1540
buf1576 = buf1538
del buf1538
buf1580 = buf1534
del buf1534
triton_poi_fused_minimum_neg_2[grid(256)](buf1546, buf1554, buf1550,
buf1556, buf1560, buf1564, buf1572, buf1576, buf1580, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1557 = torch.ops.aten.max_pool3d_with_indices.default(buf1556, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1556
buf1558 = buf1557[0]
del buf1557
buf1561 = torch.ops.aten.max_pool3d_with_indices.default(buf1560, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1560
buf1562 = buf1561[0]
del buf1561
buf1565 = torch.ops.aten.max_pool3d_with_indices.default(buf1564, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1564
buf1566 = buf1565[0]
del buf1565
buf1568 = buf1558
del buf1558
triton_poi_fused_minimum_neg_3[grid(256)](buf1568, buf1566, buf1562,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1569 = torch.ops.aten.max_pool3d_with_indices.default(buf1568, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1570 = buf1569[0]
del buf1569
buf1573 = torch.ops.aten.max_pool3d_with_indices.default(buf1572, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1574 = buf1573[0]
del buf1573
buf1577 = torch.ops.aten.max_pool3d_with_indices.default(buf1576, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1578 = buf1577[0]
del buf1577
buf1581 = torch.ops.aten.max_pool3d_with_indices.default(buf1580, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1582 = buf1581[0]
del buf1581
buf1584 = buf1580
del buf1580
buf1588 = buf1576
del buf1576
buf1592 = buf1572
del buf1572
buf1600 = buf1568
del buf1568
buf1604 = buf1566
del buf1566
buf1608 = buf1562
del buf1562
triton_poi_fused_minimum_neg_2[grid(256)](buf1574, buf1582, buf1578,
buf1584, buf1588, buf1592, buf1600, buf1604, buf1608, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1585 = torch.ops.aten.max_pool3d_with_indices.default(buf1584, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1584
buf1586 = buf1585[0]
del buf1585
buf1589 = torch.ops.aten.max_pool3d_with_indices.default(buf1588, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1588
buf1590 = buf1589[0]
del buf1589
buf1593 = torch.ops.aten.max_pool3d_with_indices.default(buf1592, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1592
buf1594 = buf1593[0]
del buf1593
buf1596 = buf1586
del buf1586
triton_poi_fused_minimum_neg_3[grid(256)](buf1596, buf1594, buf1590,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1597 = torch.ops.aten.max_pool3d_with_indices.default(buf1596, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1598 = buf1597[0]
del buf1597
buf1601 = torch.ops.aten.max_pool3d_with_indices.default(buf1600, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1602 = buf1601[0]
del buf1601
buf1605 = torch.ops.aten.max_pool3d_with_indices.default(buf1604, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1606 = buf1605[0]
del buf1605
buf1609 = torch.ops.aten.max_pool3d_with_indices.default(buf1608, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1610 = buf1609[0]
del buf1609
buf1612 = buf1608
del buf1608
buf1616 = buf1604
del buf1604
buf1620 = buf1600
del buf1600
buf1628 = buf1596
del buf1596
buf1632 = buf1594
del buf1594
buf1636 = buf1590
del buf1590
triton_poi_fused_minimum_neg_2[grid(256)](buf1602, buf1610, buf1606,
buf1612, buf1616, buf1620, buf1628, buf1632, buf1636, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1613 = torch.ops.aten.max_pool3d_with_indices.default(buf1612, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1612
buf1614 = buf1613[0]
del buf1613
buf1617 = torch.ops.aten.max_pool3d_with_indices.default(buf1616, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1616
buf1618 = buf1617[0]
del buf1617
buf1621 = torch.ops.aten.max_pool3d_with_indices.default(buf1620, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1620
buf1622 = buf1621[0]
del buf1621
buf1624 = buf1614
del buf1614
triton_poi_fused_minimum_neg_3[grid(256)](buf1624, buf1622, buf1618,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1625 = torch.ops.aten.max_pool3d_with_indices.default(buf1624, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1626 = buf1625[0]
del buf1625
buf1629 = torch.ops.aten.max_pool3d_with_indices.default(buf1628, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1630 = buf1629[0]
del buf1629
buf1633 = torch.ops.aten.max_pool3d_with_indices.default(buf1632, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1634 = buf1633[0]
del buf1633
buf1637 = torch.ops.aten.max_pool3d_with_indices.default(buf1636, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1638 = buf1637[0]
del buf1637
buf1640 = buf1636
del buf1636
buf1644 = buf1632
del buf1632
buf1648 = buf1628
del buf1628
buf1656 = buf1624
del buf1624
buf1660 = buf1622
del buf1622
buf1664 = buf1618
del buf1618
triton_poi_fused_minimum_neg_2[grid(256)](buf1630, buf1638, buf1634,
buf1640, buf1644, buf1648, buf1656, buf1660, buf1664, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1641 = torch.ops.aten.max_pool3d_with_indices.default(buf1640, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1640
buf1642 = buf1641[0]
del buf1641
buf1645 = torch.ops.aten.max_pool3d_with_indices.default(buf1644, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1644
buf1646 = buf1645[0]
del buf1645
buf1649 = torch.ops.aten.max_pool3d_with_indices.default(buf1648, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1648
buf1650 = buf1649[0]
del buf1649
buf1652 = buf1642
del buf1642
triton_poi_fused_minimum_neg_3[grid(256)](buf1652, buf1650, buf1646,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1653 = torch.ops.aten.max_pool3d_with_indices.default(buf1652, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1654 = buf1653[0]
del buf1653
buf1657 = torch.ops.aten.max_pool3d_with_indices.default(buf1656, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1658 = buf1657[0]
del buf1657
buf1661 = torch.ops.aten.max_pool3d_with_indices.default(buf1660, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1662 = buf1661[0]
del buf1661
buf1665 = torch.ops.aten.max_pool3d_with_indices.default(buf1664, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1666 = buf1665[0]
del buf1665
buf1668 = buf1664
del buf1664
buf1672 = buf1660
del buf1660
buf1676 = buf1656
del buf1656
buf1684 = buf1652
del buf1652
buf1688 = buf1650
del buf1650
buf1692 = buf1646
del buf1646
triton_poi_fused_minimum_neg_2[grid(256)](buf1658, buf1666, buf1662,
buf1668, buf1672, buf1676, buf1684, buf1688, buf1692, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1669 = torch.ops.aten.max_pool3d_with_indices.default(buf1668, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1668
buf1670 = buf1669[0]
del buf1669
buf1673 = torch.ops.aten.max_pool3d_with_indices.default(buf1672, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1672
buf1674 = buf1673[0]
del buf1673
buf1677 = torch.ops.aten.max_pool3d_with_indices.default(buf1676, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1676
buf1678 = buf1677[0]
del buf1677
buf1680 = buf1670
del buf1670
triton_poi_fused_minimum_neg_3[grid(256)](buf1680, buf1678, buf1674,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1681 = torch.ops.aten.max_pool3d_with_indices.default(buf1680, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1682 = buf1681[0]
del buf1681
buf1685 = torch.ops.aten.max_pool3d_with_indices.default(buf1684, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1686 = buf1685[0]
del buf1685
buf1689 = torch.ops.aten.max_pool3d_with_indices.default(buf1688, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1690 = buf1689[0]
del buf1689
buf1693 = torch.ops.aten.max_pool3d_with_indices.default(buf1692, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1694 = buf1693[0]
del buf1693
buf1696 = buf1692
del buf1692
buf1700 = buf1688
del buf1688
buf1704 = buf1684
del buf1684
buf1712 = buf1680
del buf1680
buf1716 = buf1678
del buf1678
buf1720 = buf1674
del buf1674
triton_poi_fused_minimum_neg_2[grid(256)](buf1686, buf1694, buf1690,
buf1696, buf1700, buf1704, buf1712, buf1716, buf1720, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1697 = torch.ops.aten.max_pool3d_with_indices.default(buf1696, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1696
buf1698 = buf1697[0]
del buf1697
buf1701 = torch.ops.aten.max_pool3d_with_indices.default(buf1700, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1700
buf1702 = buf1701[0]
del buf1701
buf1705 = torch.ops.aten.max_pool3d_with_indices.default(buf1704, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1704
buf1706 = buf1705[0]
del buf1705
buf1708 = buf1698
del buf1698
triton_poi_fused_minimum_neg_3[grid(256)](buf1708, buf1706, buf1702,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1709 = torch.ops.aten.max_pool3d_with_indices.default(buf1708, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1710 = buf1709[0]
del buf1709
buf1713 = torch.ops.aten.max_pool3d_with_indices.default(buf1712, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1714 = buf1713[0]
del buf1713
buf1717 = torch.ops.aten.max_pool3d_with_indices.default(buf1716, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1718 = buf1717[0]
del buf1717
buf1721 = torch.ops.aten.max_pool3d_with_indices.default(buf1720, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1722 = buf1721[0]
del buf1721
buf1724 = buf1720
del buf1720
buf1728 = buf1716
del buf1716
buf1732 = buf1712
del buf1712
buf1740 = buf1708
del buf1708
buf1744 = buf1706
del buf1706
buf1748 = buf1702
del buf1702
triton_poi_fused_minimum_neg_2[grid(256)](buf1714, buf1722, buf1718,
buf1724, buf1728, buf1732, buf1740, buf1744, buf1748, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1725 = torch.ops.aten.max_pool3d_with_indices.default(buf1724, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1724
buf1726 = buf1725[0]
del buf1725
buf1729 = torch.ops.aten.max_pool3d_with_indices.default(buf1728, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1728
buf1730 = buf1729[0]
del buf1729
buf1733 = torch.ops.aten.max_pool3d_with_indices.default(buf1732, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1732
buf1734 = buf1733[0]
del buf1733
buf1736 = buf1726
del buf1726
triton_poi_fused_minimum_neg_3[grid(256)](buf1736, buf1734, buf1730,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1737 = torch.ops.aten.max_pool3d_with_indices.default(buf1736, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1738 = buf1737[0]
del buf1737
buf1741 = torch.ops.aten.max_pool3d_with_indices.default(buf1740, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1742 = buf1741[0]
del buf1741
buf1745 = torch.ops.aten.max_pool3d_with_indices.default(buf1744, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1746 = buf1745[0]
del buf1745
buf1749 = torch.ops.aten.max_pool3d_with_indices.default(buf1748, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1750 = buf1749[0]
del buf1749
buf1752 = buf1748
del buf1748
buf1756 = buf1744
del buf1744
buf1760 = buf1740
del buf1740
buf1768 = buf1736
del buf1736
buf1772 = buf1734
del buf1734
buf1776 = buf1730
del buf1730
triton_poi_fused_minimum_neg_2[grid(256)](buf1742, buf1750, buf1746,
buf1752, buf1756, buf1760, buf1768, buf1772, buf1776, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1753 = torch.ops.aten.max_pool3d_with_indices.default(buf1752, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1752
buf1754 = buf1753[0]
del buf1753
buf1757 = torch.ops.aten.max_pool3d_with_indices.default(buf1756, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1756
buf1758 = buf1757[0]
del buf1757
buf1761 = torch.ops.aten.max_pool3d_with_indices.default(buf1760, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1760
buf1762 = buf1761[0]
del buf1761
buf1764 = buf1754
del buf1754
triton_poi_fused_minimum_neg_3[grid(256)](buf1764, buf1762, buf1758,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1765 = torch.ops.aten.max_pool3d_with_indices.default(buf1764, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1766 = buf1765[0]
del buf1765
buf1769 = torch.ops.aten.max_pool3d_with_indices.default(buf1768, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1770 = buf1769[0]
del buf1769
buf1773 = torch.ops.aten.max_pool3d_with_indices.default(buf1772, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1774 = buf1773[0]
del buf1773
buf1777 = torch.ops.aten.max_pool3d_with_indices.default(buf1776, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1778 = buf1777[0]
del buf1777
buf1780 = buf1776
del buf1776
buf1784 = buf1772
del buf1772
buf1788 = buf1768
del buf1768
buf1796 = buf1764
del buf1764
buf1800 = buf1762
del buf1762
buf1804 = buf1758
del buf1758
triton_poi_fused_minimum_neg_2[grid(256)](buf1770, buf1778, buf1774,
buf1780, buf1784, buf1788, buf1796, buf1800, buf1804, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1781 = torch.ops.aten.max_pool3d_with_indices.default(buf1780, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1780
buf1782 = buf1781[0]
del buf1781
buf1785 = torch.ops.aten.max_pool3d_with_indices.default(buf1784, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1784
buf1786 = buf1785[0]
del buf1785
buf1789 = torch.ops.aten.max_pool3d_with_indices.default(buf1788, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1788
buf1790 = buf1789[0]
del buf1789
buf1792 = buf1782
del buf1782
triton_poi_fused_minimum_neg_3[grid(256)](buf1792, buf1790, buf1786,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1793 = torch.ops.aten.max_pool3d_with_indices.default(buf1792, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1794 = buf1793[0]
del buf1793
buf1797 = torch.ops.aten.max_pool3d_with_indices.default(buf1796, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1798 = buf1797[0]
del buf1797
buf1801 = torch.ops.aten.max_pool3d_with_indices.default(buf1800, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1802 = buf1801[0]
del buf1801
buf1805 = torch.ops.aten.max_pool3d_with_indices.default(buf1804, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1806 = buf1805[0]
del buf1805
buf1808 = buf1804
del buf1804
buf1812 = buf1800
del buf1800
buf1816 = buf1796
del buf1796
buf1824 = buf1792
del buf1792
buf1828 = buf1790
del buf1790
buf1832 = buf1786
del buf1786
triton_poi_fused_minimum_neg_2[grid(256)](buf1798, buf1806, buf1802,
buf1808, buf1812, buf1816, buf1824, buf1828, buf1832, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1809 = torch.ops.aten.max_pool3d_with_indices.default(buf1808, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1808
buf1810 = buf1809[0]
del buf1809
buf1813 = torch.ops.aten.max_pool3d_with_indices.default(buf1812, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1812
buf1814 = buf1813[0]
del buf1813
buf1817 = torch.ops.aten.max_pool3d_with_indices.default(buf1816, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1816
buf1818 = buf1817[0]
del buf1817
buf1820 = buf1810
del buf1810
triton_poi_fused_minimum_neg_3[grid(256)](buf1820, buf1818, buf1814,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1821 = torch.ops.aten.max_pool3d_with_indices.default(buf1820, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1822 = buf1821[0]
del buf1821
buf1825 = torch.ops.aten.max_pool3d_with_indices.default(buf1824, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1826 = buf1825[0]
del buf1825
buf1829 = torch.ops.aten.max_pool3d_with_indices.default(buf1828, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1830 = buf1829[0]
del buf1829
buf1833 = torch.ops.aten.max_pool3d_with_indices.default(buf1832, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1834 = buf1833[0]
del buf1833
buf1836 = buf1832
del buf1832
buf1840 = buf1828
del buf1828
buf1844 = buf1824
del buf1824
buf1852 = buf1820
del buf1820
buf1856 = buf1818
del buf1818
buf1860 = buf1814
del buf1814
triton_poi_fused_minimum_neg_2[grid(256)](buf1826, buf1834, buf1830,
buf1836, buf1840, buf1844, buf1852, buf1856, buf1860, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1837 = torch.ops.aten.max_pool3d_with_indices.default(buf1836, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1836
buf1838 = buf1837[0]
del buf1837
buf1841 = torch.ops.aten.max_pool3d_with_indices.default(buf1840, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1840
buf1842 = buf1841[0]
del buf1841
buf1845 = torch.ops.aten.max_pool3d_with_indices.default(buf1844, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1844
buf1846 = buf1845[0]
del buf1845
buf1848 = buf1838
del buf1838
triton_poi_fused_minimum_neg_3[grid(256)](buf1848, buf1846, buf1842,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1849 = torch.ops.aten.max_pool3d_with_indices.default(buf1848, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1850 = buf1849[0]
del buf1849
buf1853 = torch.ops.aten.max_pool3d_with_indices.default(buf1852, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1854 = buf1853[0]
del buf1853
buf1857 = torch.ops.aten.max_pool3d_with_indices.default(buf1856, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1858 = buf1857[0]
del buf1857
buf1861 = torch.ops.aten.max_pool3d_with_indices.default(buf1860, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1862 = buf1861[0]
del buf1861
buf1864 = buf1860
del buf1860
buf1868 = buf1856
del buf1856
buf1872 = buf1852
del buf1852
buf1880 = buf1848
del buf1848
buf1884 = buf1846
del buf1846
buf1888 = buf1842
del buf1842
triton_poi_fused_minimum_neg_2[grid(256)](buf1854, buf1862, buf1858,
buf1864, buf1868, buf1872, buf1880, buf1884, buf1888, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1865 = torch.ops.aten.max_pool3d_with_indices.default(buf1864, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1864
buf1866 = buf1865[0]
del buf1865
buf1869 = torch.ops.aten.max_pool3d_with_indices.default(buf1868, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1868
buf1870 = buf1869[0]
del buf1869
buf1873 = torch.ops.aten.max_pool3d_with_indices.default(buf1872, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1872
buf1874 = buf1873[0]
del buf1873
buf1876 = buf1866
del buf1866
triton_poi_fused_minimum_neg_3[grid(256)](buf1876, buf1874, buf1870,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1877 = torch.ops.aten.max_pool3d_with_indices.default(buf1876, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1878 = buf1877[0]
del buf1877
buf1881 = torch.ops.aten.max_pool3d_with_indices.default(buf1880, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1882 = buf1881[0]
del buf1881
buf1885 = torch.ops.aten.max_pool3d_with_indices.default(buf1884, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1886 = buf1885[0]
del buf1885
buf1889 = torch.ops.aten.max_pool3d_with_indices.default(buf1888, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1890 = buf1889[0]
del buf1889
buf1892 = buf1888
del buf1888
buf1896 = buf1884
del buf1884
buf1900 = buf1880
del buf1880
buf1908 = buf1876
del buf1876
buf1912 = buf1874
del buf1874
buf1916 = buf1870
del buf1870
triton_poi_fused_minimum_neg_2[grid(256)](buf1882, buf1890, buf1886,
buf1892, buf1896, buf1900, buf1908, buf1912, buf1916, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1893 = torch.ops.aten.max_pool3d_with_indices.default(buf1892, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1892
buf1894 = buf1893[0]
del buf1893
buf1897 = torch.ops.aten.max_pool3d_with_indices.default(buf1896, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1896
buf1898 = buf1897[0]
del buf1897
buf1901 = torch.ops.aten.max_pool3d_with_indices.default(buf1900, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1900
buf1902 = buf1901[0]
del buf1901
buf1904 = buf1894
del buf1894
triton_poi_fused_minimum_neg_3[grid(256)](buf1904, buf1902, buf1898,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1905 = torch.ops.aten.max_pool3d_with_indices.default(buf1904, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1906 = buf1905[0]
del buf1905
buf1909 = torch.ops.aten.max_pool3d_with_indices.default(buf1908, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1910 = buf1909[0]
del buf1909
buf1913 = torch.ops.aten.max_pool3d_with_indices.default(buf1912, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1914 = buf1913[0]
del buf1913
buf1917 = torch.ops.aten.max_pool3d_with_indices.default(buf1916, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1918 = buf1917[0]
del buf1917
buf1920 = buf1916
del buf1916
buf1924 = buf1912
del buf1912
buf1928 = buf1908
del buf1908
buf1936 = buf1904
del buf1904
buf1940 = buf1902
del buf1902
buf1944 = buf1898
del buf1898
triton_poi_fused_minimum_neg_2[grid(256)](buf1910, buf1918, buf1914,
buf1920, buf1924, buf1928, buf1936, buf1940, buf1944, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1921 = torch.ops.aten.max_pool3d_with_indices.default(buf1920, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1920
buf1922 = buf1921[0]
del buf1921
buf1925 = torch.ops.aten.max_pool3d_with_indices.default(buf1924, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1924
buf1926 = buf1925[0]
del buf1925
buf1929 = torch.ops.aten.max_pool3d_with_indices.default(buf1928, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1928
buf1930 = buf1929[0]
del buf1929
buf1932 = buf1922
del buf1922
triton_poi_fused_minimum_neg_3[grid(256)](buf1932, buf1930, buf1926,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1933 = torch.ops.aten.max_pool3d_with_indices.default(buf1932, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1934 = buf1933[0]
del buf1933
buf1937 = torch.ops.aten.max_pool3d_with_indices.default(buf1936, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1938 = buf1937[0]
del buf1937
buf1941 = torch.ops.aten.max_pool3d_with_indices.default(buf1940, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1942 = buf1941[0]
del buf1941
buf1945 = torch.ops.aten.max_pool3d_with_indices.default(buf1944, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1946 = buf1945[0]
del buf1945
buf1948 = buf1944
del buf1944
buf1952 = buf1940
del buf1940
buf1956 = buf1936
del buf1936
buf1964 = buf1932
del buf1932
buf1968 = buf1930
del buf1930
buf1972 = buf1926
del buf1926
triton_poi_fused_minimum_neg_2[grid(256)](buf1938, buf1946, buf1942,
buf1948, buf1952, buf1956, buf1964, buf1968, buf1972, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1949 = torch.ops.aten.max_pool3d_with_indices.default(buf1948, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1948
buf1950 = buf1949[0]
del buf1949
buf1953 = torch.ops.aten.max_pool3d_with_indices.default(buf1952, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1952
buf1954 = buf1953[0]
del buf1953
buf1957 = torch.ops.aten.max_pool3d_with_indices.default(buf1956, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1956
buf1958 = buf1957[0]
del buf1957
buf1960 = buf1950
del buf1950
triton_poi_fused_minimum_neg_3[grid(256)](buf1960, buf1958, buf1954,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1961 = torch.ops.aten.max_pool3d_with_indices.default(buf1960, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1962 = buf1961[0]
del buf1961
buf1965 = torch.ops.aten.max_pool3d_with_indices.default(buf1964, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1966 = buf1965[0]
del buf1965
buf1969 = torch.ops.aten.max_pool3d_with_indices.default(buf1968, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1970 = buf1969[0]
del buf1969
buf1973 = torch.ops.aten.max_pool3d_with_indices.default(buf1972, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1974 = buf1973[0]
del buf1973
buf1976 = buf1972
del buf1972
buf1980 = buf1968
del buf1968
buf1984 = buf1964
del buf1964
buf1992 = buf1960
del buf1960
buf1996 = buf1958
del buf1958
buf2000 = buf1954
del buf1954
triton_poi_fused_minimum_neg_2[grid(256)](buf1966, buf1974, buf1970,
buf1976, buf1980, buf1984, buf1992, buf1996, buf2000, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1977 = torch.ops.aten.max_pool3d_with_indices.default(buf1976, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1976
buf1978 = buf1977[0]
del buf1977
buf1981 = torch.ops.aten.max_pool3d_with_indices.default(buf1980, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1980
buf1982 = buf1981[0]
del buf1981
buf1985 = torch.ops.aten.max_pool3d_with_indices.default(buf1984, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1984
buf1986 = buf1985[0]
del buf1985
buf1988 = buf1978
del buf1978
triton_poi_fused_minimum_neg_3[grid(256)](buf1988, buf1986, buf1982,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1989 = torch.ops.aten.max_pool3d_with_indices.default(buf1988, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1990 = buf1989[0]
del buf1989
buf1993 = torch.ops.aten.max_pool3d_with_indices.default(buf1992, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1994 = buf1993[0]
del buf1993
buf1997 = torch.ops.aten.max_pool3d_with_indices.default(buf1996, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1998 = buf1997[0]
del buf1997
buf2001 = torch.ops.aten.max_pool3d_with_indices.default(buf2000, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2002 = buf2001[0]
del buf2001
buf2004 = buf2000
del buf2000
buf2008 = buf1996
del buf1996
buf2012 = buf1992
del buf1992
buf2020 = buf1988
del buf1988
buf2024 = buf1986
del buf1986
buf2028 = buf1982
del buf1982
triton_poi_fused_minimum_neg_2[grid(256)](buf1994, buf2002, buf1998,
buf2004, buf2008, buf2012, buf2020, buf2024, buf2028, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2005 = torch.ops.aten.max_pool3d_with_indices.default(buf2004, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2004
buf2006 = buf2005[0]
del buf2005
buf2009 = torch.ops.aten.max_pool3d_with_indices.default(buf2008, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2008
buf2010 = buf2009[0]
del buf2009
buf2013 = torch.ops.aten.max_pool3d_with_indices.default(buf2012, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2012
buf2014 = buf2013[0]
del buf2013
buf2016 = buf2006
del buf2006
triton_poi_fused_minimum_neg_3[grid(256)](buf2016, buf2014, buf2010,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2017 = torch.ops.aten.max_pool3d_with_indices.default(buf2016, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2018 = buf2017[0]
del buf2017
buf2021 = torch.ops.aten.max_pool3d_with_indices.default(buf2020, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2022 = buf2021[0]
del buf2021
buf2025 = torch.ops.aten.max_pool3d_with_indices.default(buf2024, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2026 = buf2025[0]
del buf2025
buf2029 = torch.ops.aten.max_pool3d_with_indices.default(buf2028, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2030 = buf2029[0]
del buf2029
buf2032 = buf2028
del buf2028
buf2036 = buf2024
del buf2024
buf2040 = buf2020
del buf2020
buf2048 = buf2016
del buf2016
buf2052 = buf2014
del buf2014
buf2056 = buf2010
del buf2010
triton_poi_fused_minimum_neg_2[grid(256)](buf2022, buf2030, buf2026,
buf2032, buf2036, buf2040, buf2048, buf2052, buf2056, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2033 = torch.ops.aten.max_pool3d_with_indices.default(buf2032, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2032
buf2034 = buf2033[0]
del buf2033
buf2037 = torch.ops.aten.max_pool3d_with_indices.default(buf2036, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2036
buf2038 = buf2037[0]
del buf2037
buf2041 = torch.ops.aten.max_pool3d_with_indices.default(buf2040, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2040
buf2042 = buf2041[0]
del buf2041
buf2044 = buf2034
del buf2034
triton_poi_fused_minimum_neg_3[grid(256)](buf2044, buf2042, buf2038,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2045 = torch.ops.aten.max_pool3d_with_indices.default(buf2044, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2046 = buf2045[0]
del buf2045
buf2049 = torch.ops.aten.max_pool3d_with_indices.default(buf2048, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2050 = buf2049[0]
del buf2049
buf2053 = torch.ops.aten.max_pool3d_with_indices.default(buf2052, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2054 = buf2053[0]
del buf2053
buf2057 = torch.ops.aten.max_pool3d_with_indices.default(buf2056, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2058 = buf2057[0]
del buf2057
buf2060 = buf2056
del buf2056
buf2064 = buf2052
del buf2052
buf2068 = buf2048
del buf2048
buf2076 = buf2044
del buf2044
buf2080 = buf2042
del buf2042
buf2084 = buf2038
del buf2038
triton_poi_fused_minimum_neg_2[grid(256)](buf2050, buf2058, buf2054,
buf2060, buf2064, buf2068, buf2076, buf2080, buf2084, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2061 = torch.ops.aten.max_pool3d_with_indices.default(buf2060, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2060
buf2062 = buf2061[0]
del buf2061
buf2065 = torch.ops.aten.max_pool3d_with_indices.default(buf2064, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2064
buf2066 = buf2065[0]
del buf2065
buf2069 = torch.ops.aten.max_pool3d_with_indices.default(buf2068, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2068
buf2070 = buf2069[0]
del buf2069
buf2072 = buf2062
del buf2062
triton_poi_fused_minimum_neg_3[grid(256)](buf2072, buf2070, buf2066,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2073 = torch.ops.aten.max_pool3d_with_indices.default(buf2072, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2074 = buf2073[0]
del buf2073
buf2077 = torch.ops.aten.max_pool3d_with_indices.default(buf2076, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2078 = buf2077[0]
del buf2077
buf2081 = torch.ops.aten.max_pool3d_with_indices.default(buf2080, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2082 = buf2081[0]
del buf2081
buf2085 = torch.ops.aten.max_pool3d_with_indices.default(buf2084, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2086 = buf2085[0]
del buf2085
buf2088 = buf2084
del buf2084
buf2092 = buf2080
del buf2080
buf2096 = buf2076
del buf2076
buf2104 = buf2072
del buf2072
buf2108 = buf2070
del buf2070
buf2112 = buf2066
del buf2066
triton_poi_fused_minimum_neg_2[grid(256)](buf2078, buf2086, buf2082,
buf2088, buf2092, buf2096, buf2104, buf2108, buf2112, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2089 = torch.ops.aten.max_pool3d_with_indices.default(buf2088, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2088
buf2090 = buf2089[0]
del buf2089
buf2093 = torch.ops.aten.max_pool3d_with_indices.default(buf2092, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2092
buf2094 = buf2093[0]
del buf2093
buf2097 = torch.ops.aten.max_pool3d_with_indices.default(buf2096, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2096
buf2098 = buf2097[0]
del buf2097
buf2100 = buf2090
del buf2090
triton_poi_fused_minimum_neg_3[grid(256)](buf2100, buf2098, buf2094,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2101 = torch.ops.aten.max_pool3d_with_indices.default(buf2100, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2102 = buf2101[0]
del buf2101
buf2105 = torch.ops.aten.max_pool3d_with_indices.default(buf2104, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2106 = buf2105[0]
del buf2105
buf2109 = torch.ops.aten.max_pool3d_with_indices.default(buf2108, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2110 = buf2109[0]
del buf2109
buf2113 = torch.ops.aten.max_pool3d_with_indices.default(buf2112, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2114 = buf2113[0]
del buf2113
buf2116 = buf2112
del buf2112
buf2120 = buf2108
del buf2108
buf2124 = buf2104
del buf2104
buf2132 = buf2100
del buf2100
buf2136 = buf2098
del buf2098
buf2140 = buf2094
del buf2094
triton_poi_fused_minimum_neg_2[grid(256)](buf2106, buf2114, buf2110,
buf2116, buf2120, buf2124, buf2132, buf2136, buf2140, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2117 = torch.ops.aten.max_pool3d_with_indices.default(buf2116, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2116
buf2118 = buf2117[0]
del buf2117
buf2121 = torch.ops.aten.max_pool3d_with_indices.default(buf2120, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2120
buf2122 = buf2121[0]
del buf2121
buf2125 = torch.ops.aten.max_pool3d_with_indices.default(buf2124, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2124
buf2126 = buf2125[0]
del buf2125
buf2128 = buf2118
del buf2118
triton_poi_fused_minimum_neg_3[grid(256)](buf2128, buf2126, buf2122,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2129 = torch.ops.aten.max_pool3d_with_indices.default(buf2128, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2130 = buf2129[0]
del buf2129
buf2133 = torch.ops.aten.max_pool3d_with_indices.default(buf2132, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2134 = buf2133[0]
del buf2133
buf2137 = torch.ops.aten.max_pool3d_with_indices.default(buf2136, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2138 = buf2137[0]
del buf2137
buf2141 = torch.ops.aten.max_pool3d_with_indices.default(buf2140, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2142 = buf2141[0]
del buf2141
buf2144 = buf2140
del buf2140
buf2148 = buf2136
del buf2136
buf2152 = buf2132
del buf2132
buf2160 = buf2128
del buf2128
buf2164 = buf2126
del buf2126
buf2168 = buf2122
del buf2122
triton_poi_fused_minimum_neg_2[grid(256)](buf2134, buf2142, buf2138,
buf2144, buf2148, buf2152, buf2160, buf2164, buf2168, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2145 = torch.ops.aten.max_pool3d_with_indices.default(buf2144, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2144
buf2146 = buf2145[0]
del buf2145
buf2149 = torch.ops.aten.max_pool3d_with_indices.default(buf2148, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2148
buf2150 = buf2149[0]
del buf2149
buf2153 = torch.ops.aten.max_pool3d_with_indices.default(buf2152, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2152
buf2154 = buf2153[0]
del buf2153
buf2156 = buf2146
del buf2146
triton_poi_fused_minimum_neg_3[grid(256)](buf2156, buf2154, buf2150,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2157 = torch.ops.aten.max_pool3d_with_indices.default(buf2156, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2158 = buf2157[0]
del buf2157
buf2161 = torch.ops.aten.max_pool3d_with_indices.default(buf2160, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2162 = buf2161[0]
del buf2161
buf2165 = torch.ops.aten.max_pool3d_with_indices.default(buf2164, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2166 = buf2165[0]
del buf2165
buf2169 = torch.ops.aten.max_pool3d_with_indices.default(buf2168, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2170 = buf2169[0]
del buf2169
buf2172 = buf2168
del buf2168
buf2176 = buf2164
del buf2164
buf2180 = buf2160
del buf2160
buf2188 = buf2156
del buf2156
buf2192 = buf2154
del buf2154
buf2196 = buf2150
del buf2150
triton_poi_fused_minimum_neg_2[grid(256)](buf2162, buf2170, buf2166,
buf2172, buf2176, buf2180, buf2188, buf2192, buf2196, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2173 = torch.ops.aten.max_pool3d_with_indices.default(buf2172, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2172
buf2174 = buf2173[0]
del buf2173
buf2177 = torch.ops.aten.max_pool3d_with_indices.default(buf2176, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2176
buf2178 = buf2177[0]
del buf2177
buf2181 = torch.ops.aten.max_pool3d_with_indices.default(buf2180, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2180
buf2182 = buf2181[0]
del buf2181
buf2184 = buf2174
del buf2174
triton_poi_fused_minimum_neg_3[grid(256)](buf2184, buf2182, buf2178,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2185 = torch.ops.aten.max_pool3d_with_indices.default(buf2184, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2186 = buf2185[0]
del buf2185
buf2189 = torch.ops.aten.max_pool3d_with_indices.default(buf2188, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2190 = buf2189[0]
del buf2189
buf2193 = torch.ops.aten.max_pool3d_with_indices.default(buf2192, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2194 = buf2193[0]
del buf2193
buf2197 = torch.ops.aten.max_pool3d_with_indices.default(buf2196, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2198 = buf2197[0]
del buf2197
buf2200 = buf2196
del buf2196
buf2204 = buf2192
del buf2192
buf2208 = buf2188
del buf2188
buf2216 = buf2184
del buf2184
buf2220 = buf2182
del buf2182
buf2224 = buf2178
del buf2178
triton_poi_fused_minimum_neg_2[grid(256)](buf2190, buf2198, buf2194,
buf2200, buf2204, buf2208, buf2216, buf2220, buf2224, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2201 = torch.ops.aten.max_pool3d_with_indices.default(buf2200, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2200
buf2202 = buf2201[0]
del buf2201
buf2205 = torch.ops.aten.max_pool3d_with_indices.default(buf2204, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2204
buf2206 = buf2205[0]
del buf2205
buf2209 = torch.ops.aten.max_pool3d_with_indices.default(buf2208, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2208
buf2210 = buf2209[0]
del buf2209
buf2212 = buf2202
del buf2202
triton_poi_fused_minimum_neg_3[grid(256)](buf2212, buf2210, buf2206,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2213 = torch.ops.aten.max_pool3d_with_indices.default(buf2212, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2214 = buf2213[0]
del buf2213
buf2217 = torch.ops.aten.max_pool3d_with_indices.default(buf2216, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2218 = buf2217[0]
del buf2217
buf2221 = torch.ops.aten.max_pool3d_with_indices.default(buf2220, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2222 = buf2221[0]
del buf2221
buf2225 = torch.ops.aten.max_pool3d_with_indices.default(buf2224, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2226 = buf2225[0]
del buf2225
buf2228 = buf2224
del buf2224
buf2232 = buf2220
del buf2220
buf2236 = buf2216
del buf2216
buf2244 = buf2212
del buf2212
buf2248 = buf2210
del buf2210
buf2252 = buf2206
del buf2206
triton_poi_fused_minimum_neg_2[grid(256)](buf2218, buf2226, buf2222,
buf2228, buf2232, buf2236, buf2244, buf2248, buf2252, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2229 = torch.ops.aten.max_pool3d_with_indices.default(buf2228, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2228
buf2230 = buf2229[0]
del buf2229
buf2233 = torch.ops.aten.max_pool3d_with_indices.default(buf2232, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2232
buf2234 = buf2233[0]
del buf2233
buf2237 = torch.ops.aten.max_pool3d_with_indices.default(buf2236, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2236
buf2238 = buf2237[0]
del buf2237
buf2240 = buf2230
del buf2230
triton_poi_fused_minimum_neg_3[grid(256)](buf2240, buf2238, buf2234,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2241 = torch.ops.aten.max_pool3d_with_indices.default(buf2240, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2242 = buf2241[0]
del buf2241
buf2245 = torch.ops.aten.max_pool3d_with_indices.default(buf2244, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2246 = buf2245[0]
del buf2245
buf2249 = torch.ops.aten.max_pool3d_with_indices.default(buf2248, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2250 = buf2249[0]
del buf2249
buf2253 = torch.ops.aten.max_pool3d_with_indices.default(buf2252, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2254 = buf2253[0]
del buf2253
buf2256 = buf2252
del buf2252
buf2260 = buf2248
del buf2248
buf2264 = buf2244
del buf2244
buf2272 = buf2240
del buf2240
buf2276 = buf2238
del buf2238
buf2280 = buf2234
del buf2234
triton_poi_fused_minimum_neg_2[grid(256)](buf2246, buf2254, buf2250,
buf2256, buf2260, buf2264, buf2272, buf2276, buf2280, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2257 = torch.ops.aten.max_pool3d_with_indices.default(buf2256, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2256
buf2258 = buf2257[0]
del buf2257
buf2261 = torch.ops.aten.max_pool3d_with_indices.default(buf2260, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2260
buf2262 = buf2261[0]
del buf2261
buf2265 = torch.ops.aten.max_pool3d_with_indices.default(buf2264, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2264
buf2266 = buf2265[0]
del buf2265
buf2268 = buf2258
del buf2258
triton_poi_fused_minimum_neg_3[grid(256)](buf2268, buf2266, buf2262,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2269 = torch.ops.aten.max_pool3d_with_indices.default(buf2268, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2270 = buf2269[0]
del buf2269
buf2273 = torch.ops.aten.max_pool3d_with_indices.default(buf2272, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2274 = buf2273[0]
del buf2273
buf2277 = torch.ops.aten.max_pool3d_with_indices.default(buf2276, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2278 = buf2277[0]
del buf2277
buf2281 = torch.ops.aten.max_pool3d_with_indices.default(buf2280, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2282 = buf2281[0]
del buf2281
buf2284 = buf2280
del buf2280
buf2288 = buf2276
del buf2276
buf2292 = buf2272
del buf2272
buf2300 = buf2268
del buf2268
buf2304 = buf2266
del buf2266
buf2308 = buf2262
del buf2262
triton_poi_fused_minimum_neg_2[grid(256)](buf2274, buf2282, buf2278,
buf2284, buf2288, buf2292, buf2300, buf2304, buf2308, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2285 = torch.ops.aten.max_pool3d_with_indices.default(buf2284, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2284
buf2286 = buf2285[0]
del buf2285
buf2289 = torch.ops.aten.max_pool3d_with_indices.default(buf2288, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2288
buf2290 = buf2289[0]
del buf2289
buf2293 = torch.ops.aten.max_pool3d_with_indices.default(buf2292, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2292
buf2294 = buf2293[0]
del buf2293
buf2296 = buf2286
del buf2286
triton_poi_fused_minimum_neg_3[grid(256)](buf2296, buf2294, buf2290,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2297 = torch.ops.aten.max_pool3d_with_indices.default(buf2296, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2298 = buf2297[0]
del buf2297
buf2301 = torch.ops.aten.max_pool3d_with_indices.default(buf2300, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2302 = buf2301[0]
del buf2301
buf2305 = torch.ops.aten.max_pool3d_with_indices.default(buf2304, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2306 = buf2305[0]
del buf2305
buf2309 = torch.ops.aten.max_pool3d_with_indices.default(buf2308, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2310 = buf2309[0]
del buf2309
buf2312 = buf2308
del buf2308
buf2316 = buf2304
del buf2304
buf2320 = buf2300
del buf2300
buf2328 = buf2296
del buf2296
buf2332 = buf2294
del buf2294
buf2336 = buf2290
del buf2290
triton_poi_fused_minimum_neg_2[grid(256)](buf2302, buf2310, buf2306,
buf2312, buf2316, buf2320, buf2328, buf2332, buf2336, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2313 = torch.ops.aten.max_pool3d_with_indices.default(buf2312, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2312
buf2314 = buf2313[0]
del buf2313
buf2317 = torch.ops.aten.max_pool3d_with_indices.default(buf2316, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2316
buf2318 = buf2317[0]
del buf2317
buf2321 = torch.ops.aten.max_pool3d_with_indices.default(buf2320, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2320
buf2322 = buf2321[0]
del buf2321
buf2324 = buf2314
del buf2314
triton_poi_fused_minimum_neg_3[grid(256)](buf2324, buf2322, buf2318,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2325 = torch.ops.aten.max_pool3d_with_indices.default(buf2324, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2326 = buf2325[0]
del buf2325
buf2329 = torch.ops.aten.max_pool3d_with_indices.default(buf2328, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2330 = buf2329[0]
del buf2329
buf2333 = torch.ops.aten.max_pool3d_with_indices.default(buf2332, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2334 = buf2333[0]
del buf2333
buf2337 = torch.ops.aten.max_pool3d_with_indices.default(buf2336, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2338 = buf2337[0]
del buf2337
buf2340 = buf2336
del buf2336
buf2344 = buf2332
del buf2332
buf2348 = buf2328
del buf2328
buf2356 = buf2324
del buf2324
buf2360 = buf2322
del buf2322
buf2364 = buf2318
del buf2318
triton_poi_fused_minimum_neg_2[grid(256)](buf2330, buf2338, buf2334,
buf2340, buf2344, buf2348, buf2356, buf2360, buf2364, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2341 = torch.ops.aten.max_pool3d_with_indices.default(buf2340, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2340
buf2342 = buf2341[0]
del buf2341
buf2345 = torch.ops.aten.max_pool3d_with_indices.default(buf2344, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2344
buf2346 = buf2345[0]
del buf2345
buf2349 = torch.ops.aten.max_pool3d_with_indices.default(buf2348, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2348
buf2350 = buf2349[0]
del buf2349
buf2352 = buf2342
del buf2342
triton_poi_fused_minimum_neg_3[grid(256)](buf2352, buf2350, buf2346,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2353 = torch.ops.aten.max_pool3d_with_indices.default(buf2352, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2354 = buf2353[0]
del buf2353
buf2357 = torch.ops.aten.max_pool3d_with_indices.default(buf2356, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2358 = buf2357[0]
del buf2357
buf2361 = torch.ops.aten.max_pool3d_with_indices.default(buf2360, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2362 = buf2361[0]
del buf2361
buf2365 = torch.ops.aten.max_pool3d_with_indices.default(buf2364, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2366 = buf2365[0]
del buf2365
buf2368 = buf2364
del buf2364
buf2372 = buf2360
del buf2360
buf2376 = buf2356
del buf2356
buf2384 = buf2352
del buf2352
buf2388 = buf2350
del buf2350
buf2392 = buf2346
del buf2346
triton_poi_fused_minimum_neg_2[grid(256)](buf2358, buf2366, buf2362,
buf2368, buf2372, buf2376, buf2384, buf2388, buf2392, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2369 = torch.ops.aten.max_pool3d_with_indices.default(buf2368, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2368
buf2370 = buf2369[0]
del buf2369
buf2373 = torch.ops.aten.max_pool3d_with_indices.default(buf2372, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2372
buf2374 = buf2373[0]
del buf2373
buf2377 = torch.ops.aten.max_pool3d_with_indices.default(buf2376, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2376
buf2378 = buf2377[0]
del buf2377
buf2380 = buf2370
del buf2370
triton_poi_fused_minimum_neg_3[grid(256)](buf2380, buf2378, buf2374,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2381 = torch.ops.aten.max_pool3d_with_indices.default(buf2380, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2382 = buf2381[0]
del buf2381
buf2385 = torch.ops.aten.max_pool3d_with_indices.default(buf2384, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2386 = buf2385[0]
del buf2385
buf2389 = torch.ops.aten.max_pool3d_with_indices.default(buf2388, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2390 = buf2389[0]
del buf2389
buf2393 = torch.ops.aten.max_pool3d_with_indices.default(buf2392, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2394 = buf2393[0]
del buf2393
buf2396 = buf2392
del buf2392
buf2400 = buf2388
del buf2388
buf2404 = buf2384
del buf2384
buf2412 = buf2380
del buf2380
buf2416 = buf2378
del buf2378
buf2420 = buf2374
del buf2374
triton_poi_fused_minimum_neg_2[grid(256)](buf2386, buf2394, buf2390,
buf2396, buf2400, buf2404, buf2412, buf2416, buf2420, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2397 = torch.ops.aten.max_pool3d_with_indices.default(buf2396, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2396
buf2398 = buf2397[0]
del buf2397
buf2401 = torch.ops.aten.max_pool3d_with_indices.default(buf2400, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2400
buf2402 = buf2401[0]
del buf2401
buf2405 = torch.ops.aten.max_pool3d_with_indices.default(buf2404, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2404
buf2406 = buf2405[0]
del buf2405
buf2408 = buf2398
del buf2398
triton_poi_fused_minimum_neg_3[grid(256)](buf2408, buf2406, buf2402,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2409 = torch.ops.aten.max_pool3d_with_indices.default(buf2408, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2410 = buf2409[0]
del buf2409
buf2413 = torch.ops.aten.max_pool3d_with_indices.default(buf2412, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2414 = buf2413[0]
del buf2413
buf2417 = torch.ops.aten.max_pool3d_with_indices.default(buf2416, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2418 = buf2417[0]
del buf2417
buf2421 = torch.ops.aten.max_pool3d_with_indices.default(buf2420, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2422 = buf2421[0]
del buf2421
buf2424 = buf2420
del buf2420
buf2428 = buf2416
del buf2416
buf2432 = buf2412
del buf2412
buf2440 = buf2408
del buf2408
buf2444 = buf2406
del buf2406
buf2448 = buf2402
del buf2402
triton_poi_fused_minimum_neg_2[grid(256)](buf2414, buf2422, buf2418,
buf2424, buf2428, buf2432, buf2440, buf2444, buf2448, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2425 = torch.ops.aten.max_pool3d_with_indices.default(buf2424, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2424
buf2426 = buf2425[0]
del buf2425
buf2429 = torch.ops.aten.max_pool3d_with_indices.default(buf2428, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2428
buf2430 = buf2429[0]
del buf2429
buf2433 = torch.ops.aten.max_pool3d_with_indices.default(buf2432, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2432
buf2434 = buf2433[0]
del buf2433
buf2436 = buf2426
del buf2426
triton_poi_fused_minimum_neg_3[grid(256)](buf2436, buf2434, buf2430,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2437 = torch.ops.aten.max_pool3d_with_indices.default(buf2436, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2438 = buf2437[0]
del buf2437
buf2441 = torch.ops.aten.max_pool3d_with_indices.default(buf2440, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2442 = buf2441[0]
del buf2441
buf2445 = torch.ops.aten.max_pool3d_with_indices.default(buf2444, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2446 = buf2445[0]
del buf2445
buf2449 = torch.ops.aten.max_pool3d_with_indices.default(buf2448, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2450 = buf2449[0]
del buf2449
buf2452 = buf2448
del buf2448
buf2456 = buf2444
del buf2444
buf2460 = buf2440
del buf2440
buf2468 = buf2436
del buf2436
buf2472 = buf2434
del buf2434
buf2476 = buf2430
del buf2430
triton_poi_fused_minimum_neg_2[grid(256)](buf2442, buf2450, buf2446,
buf2452, buf2456, buf2460, buf2468, buf2472, buf2476, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2453 = torch.ops.aten.max_pool3d_with_indices.default(buf2452, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2452
buf2454 = buf2453[0]
del buf2453
buf2457 = torch.ops.aten.max_pool3d_with_indices.default(buf2456, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2456
buf2458 = buf2457[0]
del buf2457
buf2461 = torch.ops.aten.max_pool3d_with_indices.default(buf2460, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2460
buf2462 = buf2461[0]
del buf2461
buf2464 = buf2454
del buf2454
triton_poi_fused_minimum_neg_3[grid(256)](buf2464, buf2462, buf2458,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2465 = torch.ops.aten.max_pool3d_with_indices.default(buf2464, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2466 = buf2465[0]
del buf2465
buf2469 = torch.ops.aten.max_pool3d_with_indices.default(buf2468, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2470 = buf2469[0]
del buf2469
buf2473 = torch.ops.aten.max_pool3d_with_indices.default(buf2472, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2474 = buf2473[0]
del buf2473
buf2477 = torch.ops.aten.max_pool3d_with_indices.default(buf2476, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2478 = buf2477[0]
del buf2477
buf2480 = buf2476
del buf2476
buf2484 = buf2472
del buf2472
buf2488 = buf2468
del buf2468
buf2496 = buf2464
del buf2464
buf2500 = buf2462
del buf2462
buf2504 = buf2458
del buf2458
triton_poi_fused_minimum_neg_2[grid(256)](buf2470, buf2478, buf2474,
buf2480, buf2484, buf2488, buf2496, buf2500, buf2504, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2481 = torch.ops.aten.max_pool3d_with_indices.default(buf2480, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2480
buf2482 = buf2481[0]
del buf2481
buf2485 = torch.ops.aten.max_pool3d_with_indices.default(buf2484, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2484
buf2486 = buf2485[0]
del buf2485
buf2489 = torch.ops.aten.max_pool3d_with_indices.default(buf2488, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2488
buf2490 = buf2489[0]
del buf2489
buf2492 = buf2482
del buf2482
triton_poi_fused_minimum_neg_3[grid(256)](buf2492, buf2490, buf2486,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2493 = torch.ops.aten.max_pool3d_with_indices.default(buf2492, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2494 = buf2493[0]
del buf2493
buf2497 = torch.ops.aten.max_pool3d_with_indices.default(buf2496, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2498 = buf2497[0]
del buf2497
buf2501 = torch.ops.aten.max_pool3d_with_indices.default(buf2500, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2502 = buf2501[0]
del buf2501
buf2505 = torch.ops.aten.max_pool3d_with_indices.default(buf2504, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2506 = buf2505[0]
del buf2505
buf2508 = buf2504
del buf2504
buf2512 = buf2500
del buf2500
buf2516 = buf2496
del buf2496
buf2524 = buf2492
del buf2492
buf2528 = buf2490
del buf2490
buf2532 = buf2486
del buf2486
triton_poi_fused_minimum_neg_2[grid(256)](buf2498, buf2506, buf2502,
buf2508, buf2512, buf2516, buf2524, buf2528, buf2532, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2509 = torch.ops.aten.max_pool3d_with_indices.default(buf2508, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2508
buf2510 = buf2509[0]
del buf2509
buf2513 = torch.ops.aten.max_pool3d_with_indices.default(buf2512, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2512
buf2514 = buf2513[0]
del buf2513
buf2517 = torch.ops.aten.max_pool3d_with_indices.default(buf2516, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2516
buf2518 = buf2517[0]
del buf2517
buf2520 = buf2510
del buf2510
triton_poi_fused_minimum_neg_3[grid(256)](buf2520, buf2518, buf2514,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2521 = torch.ops.aten.max_pool3d_with_indices.default(buf2520, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2522 = buf2521[0]
del buf2521
buf2525 = torch.ops.aten.max_pool3d_with_indices.default(buf2524, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2526 = buf2525[0]
del buf2525
buf2529 = torch.ops.aten.max_pool3d_with_indices.default(buf2528, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2530 = buf2529[0]
del buf2529
buf2533 = torch.ops.aten.max_pool3d_with_indices.default(buf2532, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2534 = buf2533[0]
del buf2533
buf2536 = buf2532
del buf2532
buf2540 = buf2528
del buf2528
buf2544 = buf2524
del buf2524
buf2552 = buf2520
del buf2520
buf2556 = buf2518
del buf2518
buf2560 = buf2514
del buf2514
triton_poi_fused_minimum_neg_2[grid(256)](buf2526, buf2534, buf2530,
buf2536, buf2540, buf2544, buf2552, buf2556, buf2560, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2537 = torch.ops.aten.max_pool3d_with_indices.default(buf2536, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2536
buf2538 = buf2537[0]
del buf2537
buf2541 = torch.ops.aten.max_pool3d_with_indices.default(buf2540, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2540
buf2542 = buf2541[0]
del buf2541
buf2545 = torch.ops.aten.max_pool3d_with_indices.default(buf2544, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2544
buf2546 = buf2545[0]
del buf2545
buf2548 = buf2538
del buf2538
triton_poi_fused_minimum_neg_3[grid(256)](buf2548, buf2546, buf2542,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2549 = torch.ops.aten.max_pool3d_with_indices.default(buf2548, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2550 = buf2549[0]
del buf2549
buf2553 = torch.ops.aten.max_pool3d_with_indices.default(buf2552, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2554 = buf2553[0]
del buf2553
buf2557 = torch.ops.aten.max_pool3d_with_indices.default(buf2556, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2558 = buf2557[0]
del buf2557
buf2561 = torch.ops.aten.max_pool3d_with_indices.default(buf2560, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2562 = buf2561[0]
del buf2561
buf2564 = buf2560
del buf2560
buf2568 = buf2556
del buf2556
buf2572 = buf2552
del buf2552
buf2580 = buf2548
del buf2548
buf2584 = buf2546
del buf2546
buf2588 = buf2542
del buf2542
triton_poi_fused_minimum_neg_2[grid(256)](buf2554, buf2562, buf2558,
buf2564, buf2568, buf2572, buf2580, buf2584, buf2588, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2565 = torch.ops.aten.max_pool3d_with_indices.default(buf2564, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2564
buf2566 = buf2565[0]
del buf2565
buf2569 = torch.ops.aten.max_pool3d_with_indices.default(buf2568, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2568
buf2570 = buf2569[0]
del buf2569
buf2573 = torch.ops.aten.max_pool3d_with_indices.default(buf2572, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2572
buf2574 = buf2573[0]
del buf2573
buf2576 = buf2566
del buf2566
triton_poi_fused_minimum_neg_3[grid(256)](buf2576, buf2574, buf2570,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2577 = torch.ops.aten.max_pool3d_with_indices.default(buf2576, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2578 = buf2577[0]
del buf2577
buf2581 = torch.ops.aten.max_pool3d_with_indices.default(buf2580, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2582 = buf2581[0]
del buf2581
buf2585 = torch.ops.aten.max_pool3d_with_indices.default(buf2584, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2586 = buf2585[0]
del buf2585
buf2589 = torch.ops.aten.max_pool3d_with_indices.default(buf2588, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2590 = buf2589[0]
del buf2589
buf2592 = buf2588
del buf2588
buf2596 = buf2584
del buf2584
buf2600 = buf2580
del buf2580
buf2608 = buf2576
del buf2576
buf2612 = buf2574
del buf2574
buf2616 = buf2570
del buf2570
triton_poi_fused_minimum_neg_2[grid(256)](buf2582, buf2590, buf2586,
buf2592, buf2596, buf2600, buf2608, buf2612, buf2616, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2593 = torch.ops.aten.max_pool3d_with_indices.default(buf2592, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2592
buf2594 = buf2593[0]
del buf2593
buf2597 = torch.ops.aten.max_pool3d_with_indices.default(buf2596, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2596
buf2598 = buf2597[0]
del buf2597
buf2601 = torch.ops.aten.max_pool3d_with_indices.default(buf2600, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2600
buf2602 = buf2601[0]
del buf2601
buf2604 = buf2594
del buf2594
triton_poi_fused_minimum_neg_3[grid(256)](buf2604, buf2602, buf2598,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2605 = torch.ops.aten.max_pool3d_with_indices.default(buf2604, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2606 = buf2605[0]
del buf2605
buf2609 = torch.ops.aten.max_pool3d_with_indices.default(buf2608, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2610 = buf2609[0]
del buf2609
buf2613 = torch.ops.aten.max_pool3d_with_indices.default(buf2612, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2614 = buf2613[0]
del buf2613
buf2617 = torch.ops.aten.max_pool3d_with_indices.default(buf2616, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2618 = buf2617[0]
del buf2617
buf2620 = buf2616
del buf2616
buf2624 = buf2612
del buf2612
buf2628 = buf2608
del buf2608
buf2636 = buf2604
del buf2604
buf2640 = buf2602
del buf2602
buf2644 = buf2598
del buf2598
triton_poi_fused_minimum_neg_2[grid(256)](buf2610, buf2618, buf2614,
buf2620, buf2624, buf2628, buf2636, buf2640, buf2644, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2621 = torch.ops.aten.max_pool3d_with_indices.default(buf2620, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2620
buf2622 = buf2621[0]
del buf2621
buf2625 = torch.ops.aten.max_pool3d_with_indices.default(buf2624, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2624
buf2626 = buf2625[0]
del buf2625
buf2629 = torch.ops.aten.max_pool3d_with_indices.default(buf2628, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2628
buf2630 = buf2629[0]
del buf2629
buf2632 = buf2622
del buf2622
triton_poi_fused_minimum_neg_3[grid(256)](buf2632, buf2630, buf2626,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2633 = torch.ops.aten.max_pool3d_with_indices.default(buf2632, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2634 = buf2633[0]
del buf2633
buf2637 = torch.ops.aten.max_pool3d_with_indices.default(buf2636, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2638 = buf2637[0]
del buf2637
buf2641 = torch.ops.aten.max_pool3d_with_indices.default(buf2640, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2642 = buf2641[0]
del buf2641
buf2645 = torch.ops.aten.max_pool3d_with_indices.default(buf2644, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2646 = buf2645[0]
del buf2645
buf2648 = buf2644
del buf2644
buf2652 = buf2640
del buf2640
buf2656 = buf2636
del buf2636
buf2664 = buf2632
del buf2632
buf2668 = buf2630
del buf2630
buf2672 = buf2626
del buf2626
triton_poi_fused_minimum_neg_2[grid(256)](buf2638, buf2646, buf2642,
buf2648, buf2652, buf2656, buf2664, buf2668, buf2672, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2649 = torch.ops.aten.max_pool3d_with_indices.default(buf2648, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2648
buf2650 = buf2649[0]
del buf2649
buf2653 = torch.ops.aten.max_pool3d_with_indices.default(buf2652, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2652
buf2654 = buf2653[0]
del buf2653
buf2657 = torch.ops.aten.max_pool3d_with_indices.default(buf2656, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2656
buf2658 = buf2657[0]
del buf2657
buf2660 = buf2650
del buf2650
triton_poi_fused_minimum_neg_3[grid(256)](buf2660, buf2658, buf2654,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2661 = torch.ops.aten.max_pool3d_with_indices.default(buf2660, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2662 = buf2661[0]
del buf2661
buf2665 = torch.ops.aten.max_pool3d_with_indices.default(buf2664, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2666 = buf2665[0]
del buf2665
buf2669 = torch.ops.aten.max_pool3d_with_indices.default(buf2668, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2670 = buf2669[0]
del buf2669
buf2673 = torch.ops.aten.max_pool3d_with_indices.default(buf2672, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2674 = buf2673[0]
del buf2673
buf2676 = buf2672
del buf2672
buf2680 = buf2668
del buf2668
buf2684 = buf2664
del buf2664
buf2692 = buf2660
del buf2660
buf2696 = buf2658
del buf2658
buf2700 = buf2654
del buf2654
triton_poi_fused_minimum_neg_2[grid(256)](buf2666, buf2674, buf2670,
buf2676, buf2680, buf2684, buf2692, buf2696, buf2700, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2677 = torch.ops.aten.max_pool3d_with_indices.default(buf2676, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2676
buf2678 = buf2677[0]
del buf2677
buf2681 = torch.ops.aten.max_pool3d_with_indices.default(buf2680, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2680
buf2682 = buf2681[0]
del buf2681
buf2685 = torch.ops.aten.max_pool3d_with_indices.default(buf2684, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2684
buf2686 = buf2685[0]
del buf2685
buf2688 = buf2678
del buf2678
triton_poi_fused_minimum_neg_3[grid(256)](buf2688, buf2686, buf2682,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2689 = torch.ops.aten.max_pool3d_with_indices.default(buf2688, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2690 = buf2689[0]
del buf2689
buf2693 = torch.ops.aten.max_pool3d_with_indices.default(buf2692, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2694 = buf2693[0]
del buf2693
buf2697 = torch.ops.aten.max_pool3d_with_indices.default(buf2696, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2698 = buf2697[0]
del buf2697
buf2701 = torch.ops.aten.max_pool3d_with_indices.default(buf2700, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2702 = buf2701[0]
del buf2701
buf2704 = buf2700
del buf2700
buf2708 = buf2696
del buf2696
buf2712 = buf2692
del buf2692
buf2720 = buf2688
del buf2688
buf2724 = buf2686
del buf2686
buf2728 = buf2682
del buf2682
triton_poi_fused_minimum_neg_2[grid(256)](buf2694, buf2702, buf2698,
buf2704, buf2708, buf2712, buf2720, buf2724, buf2728, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2705 = torch.ops.aten.max_pool3d_with_indices.default(buf2704, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2704
buf2706 = buf2705[0]
del buf2705
buf2709 = torch.ops.aten.max_pool3d_with_indices.default(buf2708, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2708
buf2710 = buf2709[0]
del buf2709
buf2713 = torch.ops.aten.max_pool3d_with_indices.default(buf2712, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2712
buf2714 = buf2713[0]
del buf2713
buf2716 = buf2706
del buf2706
triton_poi_fused_minimum_neg_3[grid(256)](buf2716, buf2714, buf2710,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2717 = torch.ops.aten.max_pool3d_with_indices.default(buf2716, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2718 = buf2717[0]
del buf2717
buf2721 = torch.ops.aten.max_pool3d_with_indices.default(buf2720, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2722 = buf2721[0]
del buf2721
buf2725 = torch.ops.aten.max_pool3d_with_indices.default(buf2724, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2726 = buf2725[0]
del buf2725
buf2729 = torch.ops.aten.max_pool3d_with_indices.default(buf2728, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2730 = buf2729[0]
del buf2729
buf2732 = buf2728
del buf2728
buf2736 = buf2724
del buf2724
buf2740 = buf2720
del buf2720
buf2748 = buf2716
del buf2716
buf2752 = buf2714
del buf2714
buf2756 = buf2710
del buf2710
triton_poi_fused_minimum_neg_2[grid(256)](buf2722, buf2730, buf2726,
buf2732, buf2736, buf2740, buf2748, buf2752, buf2756, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2733 = torch.ops.aten.max_pool3d_with_indices.default(buf2732, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2732
buf2734 = buf2733[0]
del buf2733
buf2737 = torch.ops.aten.max_pool3d_with_indices.default(buf2736, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2736
buf2738 = buf2737[0]
del buf2737
buf2741 = torch.ops.aten.max_pool3d_with_indices.default(buf2740, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2740
buf2742 = buf2741[0]
del buf2741
buf2744 = buf2734
del buf2734
triton_poi_fused_minimum_neg_3[grid(256)](buf2744, buf2742, buf2738,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2745 = torch.ops.aten.max_pool3d_with_indices.default(buf2744, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2746 = buf2745[0]
del buf2745
buf2749 = torch.ops.aten.max_pool3d_with_indices.default(buf2748, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2750 = buf2749[0]
del buf2749
buf2753 = torch.ops.aten.max_pool3d_with_indices.default(buf2752, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2754 = buf2753[0]
del buf2753
buf2757 = torch.ops.aten.max_pool3d_with_indices.default(buf2756, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2758 = buf2757[0]
del buf2757
buf2760 = buf2756
del buf2756
buf2764 = buf2752
del buf2752
buf2768 = buf2748
del buf2748
buf2776 = buf2744
del buf2744
buf2780 = buf2742
del buf2742
buf2784 = buf2738
del buf2738
triton_poi_fused_minimum_neg_2[grid(256)](buf2750, buf2758, buf2754,
buf2760, buf2764, buf2768, buf2776, buf2780, buf2784, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2761 = torch.ops.aten.max_pool3d_with_indices.default(buf2760, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2760
buf2762 = buf2761[0]
del buf2761
buf2765 = torch.ops.aten.max_pool3d_with_indices.default(buf2764, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2764
buf2766 = buf2765[0]
del buf2765
buf2769 = torch.ops.aten.max_pool3d_with_indices.default(buf2768, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2768
buf2770 = buf2769[0]
del buf2769
buf2772 = buf2762
del buf2762
triton_poi_fused_minimum_neg_3[grid(256)](buf2772, buf2770, buf2766,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2773 = torch.ops.aten.max_pool3d_with_indices.default(buf2772, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2774 = buf2773[0]
del buf2773
buf2777 = torch.ops.aten.max_pool3d_with_indices.default(buf2776, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2778 = buf2777[0]
del buf2777
buf2781 = torch.ops.aten.max_pool3d_with_indices.default(buf2780, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2782 = buf2781[0]
del buf2781
buf2785 = torch.ops.aten.max_pool3d_with_indices.default(buf2784, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2786 = buf2785[0]
del buf2785
buf2788 = buf2784
del buf2784
buf2792 = buf2780
del buf2780
buf2796 = buf2776
del buf2776
buf2804 = buf2772
del buf2772
buf2808 = buf2770
del buf2770
buf2812 = buf2766
del buf2766
triton_poi_fused_minimum_neg_2[grid(256)](buf2778, buf2786, buf2782,
buf2788, buf2792, buf2796, buf2804, buf2808, buf2812, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2789 = torch.ops.aten.max_pool3d_with_indices.default(buf2788, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2788
buf2790 = buf2789[0]
del buf2789
buf2793 = torch.ops.aten.max_pool3d_with_indices.default(buf2792, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2792
buf2794 = buf2793[0]
del buf2793
buf2797 = torch.ops.aten.max_pool3d_with_indices.default(buf2796, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2796
buf2798 = buf2797[0]
del buf2797
buf2800 = buf2790
del buf2790
triton_poi_fused_minimum_neg_3[grid(256)](buf2800, buf2798, buf2794,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2794
del buf2798
buf2801 = torch.ops.aten.max_pool3d_with_indices.default(buf2800, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf2800
buf2802 = buf2801[0]
del buf2801
buf2805 = torch.ops.aten.max_pool3d_with_indices.default(buf2804, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2806 = buf2805[0]
del buf2805
buf2809 = torch.ops.aten.max_pool3d_with_indices.default(buf2808, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2810 = buf2809[0]
del buf2809
buf2813 = torch.ops.aten.max_pool3d_with_indices.default(buf2812, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2814 = buf2813[0]
del buf2813
buf2816 = buf2812
del buf2812
buf2820 = buf2808
del buf2808
buf2824 = buf2804
del buf2804
triton_poi_fused_minimum_neg_4[grid(256)](buf2806, buf2814, buf2810,
buf2816, buf2820, buf2824, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf2817 = torch.ops.aten.max_pool3d_with_indices.default(buf2816, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2816
buf2818 = buf2817[0]
del buf2817
buf2821 = torch.ops.aten.max_pool3d_with_indices.default(buf2820, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2820
buf2822 = buf2821[0]
del buf2821
buf2825 = torch.ops.aten.max_pool3d_with_indices.default(buf2824, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2824
buf2826 = buf2825[0]
del buf2825
buf2828 = buf2818
del buf2818
triton_poi_fused_minimum_neg_3[grid(256)](buf2828, buf2826, buf2822,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2822
del buf2826
buf2829 = torch.ops.aten.max_pool3d_with_indices.default(buf2828, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf2828
buf2830 = buf2829[0]
del buf2829
buf2832 = buf1430
del buf1430
buf2833 = buf1462
del buf1462
buf2834 = buf1490
del buf1490
buf2835 = buf1518
del buf1518
buf2836 = buf1546
del buf1546
buf2837 = buf1574
del buf1574
buf2838 = buf1602
del buf1602
buf2839 = buf1630
del buf1630
buf2840 = buf1658
del buf1658
buf2841 = buf1686
del buf1686
buf2842 = buf1714
del buf1714
buf2843 = buf1742
del buf1742
buf2844 = buf1770
del buf1770
buf2845 = buf1798
del buf1798
buf2846 = buf1826
del buf1826
buf2847 = buf1854
del buf1854
buf2848 = buf1882
del buf1882
buf2849 = buf1910
del buf1910
buf2850 = buf1938
del buf1938
buf2851 = buf1966
del buf1966
buf2852 = buf1994
del buf1994
buf2853 = buf2022
del buf2022
buf2854 = buf2050
del buf2050
buf2855 = buf2078
del buf2078
buf2856 = buf2106
del buf2106
buf2857 = buf2134
del buf2134
buf2858 = buf2162
del buf2162
buf2859 = buf2190
del buf2190
buf2860 = buf2218
del buf2218
buf2861 = buf2246
del buf2246
buf2862 = buf2274
del buf2274
buf2863 = buf2302
del buf2302
buf2864 = buf2330
del buf2330
buf2865 = buf2358
del buf2358
buf2866 = buf2386
del buf2386
buf2867 = buf2414
del buf2414
buf2868 = buf2442
del buf2442
buf2869 = buf2470
del buf2470
buf2870 = buf2498
del buf2498
buf2871 = buf2526
del buf2526
buf2872 = buf2554
del buf2554
buf2873 = buf2582
del buf2582
buf2874 = buf2610
del buf2610
buf2875 = buf2638
del buf2638
buf2876 = buf2666
del buf2666
buf2877 = buf2694
del buf2694
buf2878 = buf2722
del buf2722
buf2879 = buf2750
del buf2750
buf2880 = buf2778
del buf2778
buf2881 = buf2806
del buf2806
triton_poi_fused_add_minimum_mul_neg_relu_sub_5[grid(256)](buf2832,
buf2833, buf2834, buf2835, buf2836, buf2837, buf2838, buf2839,
buf2840, buf2841, buf2842, buf2843, buf2844, buf2845, buf2846,
buf2847, buf2848, buf2849, buf2850, buf2851, buf2852, buf2853,
buf2854, buf2855, buf2856, buf2857, buf2858, buf2859, buf2860,
buf2861, buf2862, buf2863, buf2864, buf2865, buf2866, buf2867,
buf2868, buf2869, buf2870, buf2871, buf2872, buf2873, buf2874,
buf2875, buf2876, buf2877, buf2878, buf2879, buf2880, buf2881,
arg1_1, buf1434, buf1442, buf1438, buf1458, buf1470, buf1466,
buf1486, buf1498, buf1494, buf1514, buf1526, buf1522, buf1542,
buf1554, buf1550, buf1570, buf1582, buf1578, buf1598, buf1610,
buf1606, buf1626, buf1638, buf1634, buf1654, buf1666, buf1662,
buf1682, buf1694, buf1690, buf1710, buf1722, buf1718, buf1738,
buf1750, buf1746, buf1766, buf1778, buf1774, buf1794, buf1806,
buf1802, buf1822, buf1834, buf1830, buf1850, buf1862, buf1858,
buf1878, buf1890, buf1886, buf1906, buf1918, buf1914, buf1934,
buf1946, buf1942, buf1962, buf1974, buf1970, buf1990, buf2002,
buf1998, buf2018, buf2030, buf2026, buf2046, buf2058, buf2054,
buf2074, buf2086, buf2082, buf2102, buf2114, buf2110, buf2130,
buf2142, buf2138, buf2158, buf2170, buf2166, buf2186, buf2198,
buf2194, buf2214, buf2226, buf2222, buf2242, buf2254, buf2250,
buf2270, buf2282, buf2278, buf2298, buf2310, buf2306, buf2326,
buf2338, buf2334, buf2354, buf2366, buf2362, buf2382, buf2394,
buf2390, buf2410, buf2422, buf2418, buf2438, buf2450, buf2446,
buf2466, buf2478, buf2474, buf2494, buf2506, buf2502, buf2522,
buf2534, buf2530, buf2550, buf2562, buf2558, buf2578, buf2590,
buf2586, buf2606, buf2618, buf2614, buf2634, buf2646, buf2642,
buf2662, buf2674, buf2670, buf2690, buf2702, buf2698, buf2718,
buf2730, buf2726, buf2746, buf2758, buf2754, buf2774, buf2786,
buf2782, buf2802, buf2814, buf2810, buf2830, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
del buf1434
del buf1438
del buf1442
del buf1458
del buf1466
del buf1470
del buf1486
del buf1494
del buf1498
del buf1514
del buf1522
del buf1526
del buf1542
del buf1550
del buf1554
del buf1570
del buf1578
del buf1582
del buf1598
del buf1606
del buf1610
del buf1626
del buf1634
del buf1638
del buf1654
del buf1662
del buf1666
del buf1682
del buf1690
del buf1694
del buf1710
del buf1718
del buf1722
del buf1738
del buf1746
del buf1750
del buf1766
del buf1774
del buf1778
del buf1794
del buf1802
del buf1806
del buf1822
del buf1830
del buf1834
del buf1850
del buf1858
del buf1862
del buf1878
del buf1886
del buf1890
del buf1906
del buf1914
del buf1918
del buf1934
del buf1942
del buf1946
del buf1962
del buf1970
del buf1974
del buf1990
del buf1998
del buf2002
del buf2018
del buf2026
del buf2030
del buf2046
del buf2054
del buf2058
del buf2074
del buf2082
del buf2086
del buf2102
del buf2110
del buf2114
del buf2130
del buf2138
del buf2142
del buf2158
del buf2166
del buf2170
del buf2186
del buf2194
del buf2198
del buf2214
del buf2222
del buf2226
del buf2242
del buf2250
del buf2254
del buf2270
del buf2278
del buf2282
del buf2298
del buf2306
del buf2310
del buf2326
del buf2334
del buf2338
del buf2354
del buf2362
del buf2366
del buf2382
del buf2390
del buf2394
del buf2410
del buf2418
del buf2422
del buf2438
del buf2446
del buf2450
del buf2466
del buf2474
del buf2478
del buf2494
del buf2502
del buf2506
del buf2522
del buf2530
del buf2534
del buf2550
del buf2558
del buf2562
del buf2578
del buf2586
del buf2590
del buf2606
del buf2614
del buf2618
del buf2634
del buf2642
del buf2646
del buf2662
del buf2670
del buf2674
del buf2690
del buf2698
del buf2702
del buf2718
del buf2726
del buf2730
del buf2746
del buf2754
del buf2758
del buf2774
del buf2782
del buf2786
del buf2802
del buf2810
del buf2814
del buf2830
del buf2832
del buf2833
del buf2834
del buf2835
del buf2836
del buf2837
del buf2838
del buf2839
del buf2840
del buf2841
del buf2842
del buf2843
del buf2844
del buf2845
del buf2846
del buf2847
del buf2848
del buf2849
del buf2850
del buf2851
del buf2852
del buf2853
del buf2854
del buf2855
del buf2856
del buf2857
del buf2858
del buf2859
del buf2860
del buf2861
del buf2862
del buf2863
del buf2864
del buf2865
del buf2866
del buf2867
del buf2868
del buf2869
del buf2870
del buf2871
del buf2872
del buf2873
del buf2874
del buf2875
del buf2876
del buf2877
del buf2878
del buf2879
del buf2880
buf2882 = buf14
del buf14
buf2883 = buf2882
del buf2882
buf2884 = buf2883
del buf2883
buf2885 = buf102
del buf102
buf2886 = buf130
del buf130
buf2887 = buf158
del buf158
buf2888 = buf186
del buf186
buf2889 = buf214
del buf214
buf2890 = buf242
del buf242
buf2891 = buf270
del buf270
buf2892 = buf2891
del buf2891
buf2893 = buf2892
del buf2892
buf2894 = buf2893
del buf2893
buf2895 = buf2894
del buf2894
buf2896 = buf2895
del buf2895
buf2897 = buf2896
del buf2896
buf2898 = buf2897
del buf2897
buf2899 = buf2898
del buf2898
buf2900 = buf2899
del buf2899
buf2901 = buf2900
del buf2900
buf2902 = buf2901
del buf2901
buf2903 = buf2902
del buf2902
buf2904 = buf2903
del buf2903
buf2905 = buf2904
del buf2904
buf2906 = buf2905
del buf2905
buf2907 = buf2906
del buf2906
buf2908 = buf2907
del buf2907
buf2909 = buf2908
del buf2908
buf2910 = buf2909
del buf2909
buf2911 = buf2910
del buf2910
buf2912 = buf2911
del buf2911
buf2913 = buf2912
del buf2912
buf2914 = buf2913
del buf2913
buf2915 = buf2914
del buf2914
buf2916 = buf2915
del buf2915
buf2917 = buf1002
del buf1002
buf2918 = buf1026
del buf1026
buf2919 = buf1054
del buf1054
buf2920 = buf1082
del buf1082
buf2921 = buf1110
del buf1110
buf2922 = buf1138
del buf1138
buf2923 = buf1166
del buf1166
buf2924 = buf1194
del buf1194
buf2925 = buf1222
del buf1222
buf2926 = buf1250
del buf1250
buf2927 = buf1278
del buf1278
buf2928 = buf1306
del buf1306
buf2929 = buf1334
del buf1334
buf2930 = buf1362
del buf1362
buf2931 = buf1390
del buf1390
triton_poi_fused_add_minimum_mul_neg_relu_sub_6[grid(256)](buf2884,
buf2885, buf2886, buf2887, buf2888, buf2889, buf2890, buf2916,
buf2917, buf2918, buf2919, buf2920, buf2921, buf2922, buf2923,
buf2924, buf2925, buf2926, buf2927, buf2928, buf2929, buf2930,
buf2931, arg0_1, buf18, buf26, buf22, buf42, buf46, buf54,
buf50, buf70, buf74, buf82, buf78, buf98, buf110, buf106,
buf126, buf138, buf134, buf154, buf166, buf162, buf182, buf194,
buf190, buf210, buf222, buf218, buf238, buf250, buf246, buf266,
buf278, buf274, buf294, buf298, buf306, buf302, buf322, buf326,
buf334, buf330, buf350, buf354, buf362, buf358, buf378, buf382,
buf390, buf386, buf406, buf410, buf418, buf414, buf434, buf438,
buf446, buf442, buf462, buf466, buf474, buf470, buf490, buf494,
buf502, buf498, buf518, buf522, buf530, buf526, buf546, buf550,
buf558, buf554, buf574, buf578, buf586, buf582, buf602, buf606,
buf614, buf610, buf630, buf634, buf642, buf638, buf658, buf662,
buf670, buf666, buf686, buf690, buf698, buf694, buf714, buf718,
buf726, buf722, buf742, buf746, buf754, buf750, buf770, buf774,
buf782, buf778, buf798, buf802, buf810, buf806, buf826, buf830,
buf838, buf834, buf854, buf858, buf866, buf862, buf882, buf886,
buf894, buf890, buf910, buf914, buf922, buf918, buf938, buf942,
buf950, buf946, buf966, buf970, buf978, buf974, buf994, buf998,
buf1006, buf1022, buf1034, buf1030, buf1050, buf1062, buf1058,
buf1078, buf1090, buf1086, buf1106, buf1118, buf1114, buf1134,
buf1146, buf1142, buf1162, buf1174, buf1170, buf1190, buf1202,
buf1198, buf1218, buf1230, buf1226, buf1246, buf1258, buf1254,
buf1274, buf1286, buf1282, buf1302, buf1314, buf1310, buf1330,
buf1342, buf1338, buf1358, buf1370, buf1366, buf1386, buf1398,
buf1394, buf1414, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf1006
del buf1022
del buf1030
del buf1034
del buf1050
del buf1058
del buf106
del buf1062
del buf1078
del buf1086
del buf1090
del buf110
del buf1106
del buf1114
del buf1118
del buf1134
del buf1142
del buf1146
del buf1162
del buf1170
del buf1174
del buf1190
del buf1198
del buf1202
del buf1218
del buf1226
del buf1230
del buf1246
del buf1254
del buf1258
del buf126
del buf1274
del buf1282
del buf1286
del buf1302
del buf1310
del buf1314
del buf1330
del buf1338
del buf134
del buf1342
del buf1358
del buf1366
del buf1370
del buf138
del buf1386
del buf1394
del buf1398
del buf1414
del buf154
del buf162
del buf166
del buf18
del buf182
del buf190
del buf194
del buf210
del buf218
del buf22
del buf222
del buf238
del buf246
del buf250
del buf26
del buf266
del buf274
del buf278
del buf2884
del buf2885
del buf2886
del buf2887
del buf2888
del buf2889
del buf2890
del buf2916
del buf2917
del buf2918
del buf2919
del buf2920
del buf2921
del buf2922
del buf2923
del buf2924
del buf2925
del buf2926
del buf2927
del buf2928
del buf2929
del buf2930
del buf294
del buf298
del buf302
del buf306
del buf322
del buf326
del buf330
del buf334
del buf350
del buf354
del buf358
del buf362
del buf378
del buf382
del buf386
del buf390
del buf406
del buf410
del buf414
del buf418
del buf42
del buf434
del buf438
del buf442
del buf446
del buf46
del buf462
del buf466
del buf470
del buf474
del buf490
del buf494
del buf498
del buf50
del buf502
del buf518
del buf522
del buf526
del buf530
del buf54
del buf546
del buf550
del buf554
del buf558
del buf574
del buf578
del buf582
del buf586
del buf602
del buf606
del buf610
del buf614
del buf630
del buf634
del buf638
del buf642
del buf658
del buf662
del buf666
del buf670
del buf686
del buf690
del buf694
del buf698
del buf70
del buf714
del buf718
del buf722
del buf726
del buf74
del buf742
del buf746
del buf750
del buf754
del buf770
del buf774
del buf778
del buf78
del buf782
del buf798
del buf802
del buf806
del buf810
del buf82
del buf826
del buf830
del buf834
del buf838
del buf854
del buf858
del buf862
del buf866
del buf882
del buf886
del buf890
del buf894
del buf910
del buf914
del buf918
del buf922
del buf938
del buf942
del buf946
del buf950
del buf966
del buf970
del buf974
del buf978
del buf98
del buf994
del buf998
return buf2881, buf2931
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp,
dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp,
dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn,
dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn,
dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
def soft_erode(I):
p1 = -F.max_pool3d(-I, (3, 1, 1), (1, 1, 1), (1, 0, 0))
p2 = -F.max_pool3d(-I, (1, 3, 1), (1, 1, 1), (0, 1, 0))
p3 = -F.max_pool3d(-I, (1, 1, 3), (1, 1, 1), (0, 0, 1))
return torch.min(torch.min(p1, p3), p2)
def soft_dilate(I):
return F.max_pool3d(I, (3, 3, 3), (1, 1, 1), (1, 1, 1))
def soft_open(I):
return soft_dilate(soft_erode(I))
def soft_skel(img, k=50):
img1 = soft_open(img)
skel = F.relu(img - img1)
for iter in range(k):
img = soft_erode(img)
img1 = soft_open(img)
delta = F.relu(img - img1)
skel = skel + F.relu(delta - skel * delta)
if torch.cuda.is_available():
del img1
del delta
return skel
class SoftClDiceLossNew(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, k=2):
"""
"""
super(SoftClDiceLossNew, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.k = k
def softCenterline(self, I):
max = nn.MaxPool3d(3, stride=1, padding=1)
relu = nn.ReLU()
Ip = max(-max(-I))
cl = relu(I - Ip)
for iter in range(self.k):
I = -max(-I)
Ip = max(-max(-I))
cl = cl + cl * relu(I - Ip)
return cl
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
CamilaGL/nnUNet
|
SoftClDiceLoss
| false
| 1,175
|
[
"Apache-2.0"
] | 0
|
471ab73a6e4f67fc72d476183b5344be4cccf7ca
|
https://github.com/CamilaGL/nnUNet/tree/471ab73a6e4f67fc72d476183b5344be4cccf7ca
|
ConvLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvLayer(nn.Module):
"""A Convolutional Layer"""
def __init__(self, in_channels=1, out_channels=256, kernel_size=9, stride=1
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
return F.relu(self.conv(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
xnumel = 3136
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_out_ptr0 + (x2 + 3136 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 3136 * y3), tmp4, xmask)
tl.store(out_ptr0 + (y0 + 256 * x2 + 802816 * y1), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 1, 9, 9), (81, 81, 9, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 56, 56), (802816, 3136, 56, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 256, 56, 56), (802816, 1, 14336, 256),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(1024, 3136)
](buf1, primals_2, buf2, 1024, 3136, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class ConvLayerNew(nn.Module):
"""A Convolutional Layer"""
def __init__(self, in_channels=1, out_channels=256, kernel_size=9, stride=1
):
super(ConvLayerNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
VIVelev/capsnets
|
ConvLayer
| false
| 1,176
|
[
"MIT"
] | 0
|
dca4bfcd4007977a6bc3534a4676880326fcf94a
|
https://github.com/VIVelev/capsnets/tree/dca4bfcd4007977a6bc3534a4676880326fcf94a
|
MultiHeadAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
"""Multi-headed Attention for input Query, Key, Value
Multi-headed Attention is a module for attention mechanisms which runs through attention in several times in
parallel, then the multiple outputs are concatenated and linearly transformed
Args:
embed_size (int): Max embedding size
num_heads (int): Number of heads in multi-headed attention; Number of splits in the embedding size
dropout (float, optional): Percentage of Dropout to be applied in range 0 <= dropout <=1
batch_dim (int, optional): The dimension in which batch dimensions is
"""
def __init__(self, embed_size: 'int', num_heads: 'int', dropout:
'float'=0.2, batch_dim: 'int'=0):
super(MultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.num_heads = num_heads
self.dropout = dropout
self.batch_dim = batch_dim
self.dropout_layer = nn.Dropout(dropout)
self.head_size = self.embed_size // self.num_heads
assert self.head_size * self.num_heads == self.embed_size, 'Heads cannot split Embedding size equally'
self.Q = nn.Linear(self.embed_size, self.embed_size)
self.K = nn.Linear(self.embed_size, self.embed_size)
self.V = nn.Linear(self.embed_size, self.embed_size)
self.linear = nn.Linear(self.embed_size, self.embed_size)
def forward(self, q, k, v, mask=None):
q_batch_size, q_seq_len, _q_embed_size = q.size()
k_batch_size, k_seq_len, _k_embed_size = k.size()
v_batch_size, v_seq_len, _v_embed_size = v.size()
q = self.Q(q).reshape(q_batch_size, q_seq_len, self.num_heads, self
.head_size)
k = self.K(k).reshape(k_batch_size, k_seq_len, self.num_heads, self
.head_size)
v = self.V(v).reshape(v_batch_size, v_seq_len, self.num_heads, self
.head_size)
attention = self.attention(q, k, v, mask=mask)
concatenated = attention.reshape(v_batch_size, -1, self.embed_size)
out = self.linear(concatenated)
return out
def attention(self, q, k, v, mask=None):
scores = torch.einsum('bqhe,bkhe->bhqk', [q, k])
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores /= math.sqrt(self.embed_size)
scores = F.softmax(scores, dim=-1)
scores = self.dropout_layer(scores)
attention = torch.einsum('bhql,blhd->bqhd', [scores, v])
return attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'embed_size': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp3
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = tmp10 * tmp3
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp0 * tmp13
tmp15 = tmp14 * tmp3
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_4
del primals_5
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_6
del primals_7
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, buf3, buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf4
triton_poi_fused_clone_2[grid(16, 4)](buf2, primals_9, buf6, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_4[grid(64)](buf10, primals_11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_11
return buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf1, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0)
class MultiHeadAttentionNew(nn.Module):
"""Multi-headed Attention for input Query, Key, Value
Multi-headed Attention is a module for attention mechanisms which runs through attention in several times in
parallel, then the multiple outputs are concatenated and linearly transformed
Args:
embed_size (int): Max embedding size
num_heads (int): Number of heads in multi-headed attention; Number of splits in the embedding size
dropout (float, optional): Percentage of Dropout to be applied in range 0 <= dropout <=1
batch_dim (int, optional): The dimension in which batch dimensions is
"""
def __init__(self, embed_size: 'int', num_heads: 'int', dropout:
'float'=0.2, batch_dim: 'int'=0):
super(MultiHeadAttentionNew, self).__init__()
self.embed_size = embed_size
self.num_heads = num_heads
self.dropout = dropout
self.batch_dim = batch_dim
self.dropout_layer = nn.Dropout(dropout)
self.head_size = self.embed_size // self.num_heads
assert self.head_size * self.num_heads == self.embed_size, 'Heads cannot split Embedding size equally'
self.Q = nn.Linear(self.embed_size, self.embed_size)
self.K = nn.Linear(self.embed_size, self.embed_size)
self.V = nn.Linear(self.embed_size, self.embed_size)
self.linear = nn.Linear(self.embed_size, self.embed_size)
def attention(self, q, k, v, mask=None):
scores = torch.einsum('bqhe,bkhe->bhqk', [q, k])
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores /= math.sqrt(self.embed_size)
scores = F.softmax(scores, dim=-1)
scores = self.dropout_layer(scores)
attention = torch.einsum('bhql,blhd->bqhd', [scores, v])
return attention
def forward(self, input_0, input_1, input_2):
primals_4 = self.Q.weight
primals_5 = self.Q.bias
primals_6 = self.K.weight
primals_7 = self.K.bias
primals_8 = self.V.weight
primals_9 = self.V.bias
primals_10 = self.linear.weight
primals_11 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
UdbhavPrasad072300/Kaggle-Competition-Templates
|
MultiHeadAttention
| false
| 1,177
|
[
"MIT"
] | 0
|
f3c93ff60ae33af9b6c6d79d30c5099eb250396c
|
https://github.com/UdbhavPrasad072300/Kaggle-Competition-Templates/tree/f3c93ff60ae33af9b6c6d79d30c5099eb250396c
|
WeighedL1Loss
|
import torch
from torch import Tensor
from torch.nn import L1Loss
class WeighedL1Loss(L1Loss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
loss = super().forward(input, target)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weights': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import L1Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WeighedL1LossNew(L1Loss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
UT-ADL/lidar-as-camera
|
WeighedL1Loss
| false
| 1,178
|
[
"Apache-2.0"
] | 0
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
https://github.com/UT-ADL/lidar-as-camera/tree/daccb2ae21b4899ecfd8611b7a27f91681617383
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.