entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
ShakeResNeXt
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
if self.training:
h = ShakeShake.apply(h1, h2, self.training)
else:
h = 0.5 * (h1 + h2)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'cardinary': 4, 'label': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (16, 1024),
(1024, 1), 0), reinterpret_tensor(primals_4, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (16,
1024), (1024, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
if self.training:
h = ShakeShake.apply(h1, h2, self.training)
else:
h = 0.5 * (h1 + h2)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXtNew(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXtNew, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DensoITLab/TeachAugment
|
ShakeResNeXt
| false
| 7,984
|
[
"BSD-2-Clause"
] | 20
|
66ec099a0afab99e18531c5437182cfe17dc30c8
|
https://github.com/DensoITLab/TeachAugment/tree/66ec099a0afab99e18531c5437182cfe17dc30c8
|
FiLM
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class FiLM(nn.Module):
"""
A Feature-wise Linear Modulation Layer from
'FiLM: Visual Reasoning with a General Conditioning Layer'
"""
def forward(self, x, gammas, betas):
return gammas * x + betas
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class FiLMNew(nn.Module):
"""
A Feature-wise Linear Modulation Layer from
'FiLM: Visual Reasoning with a General Conditioning Layer'
"""
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Dou-Yiming/YouRefIt_ERU
|
FiLM
| false
| 7,985
|
[
"MIT"
] | 13
|
2a8e849380ed2d253c467b1af744a514bc171372
|
https://github.com/Dou-Yiming/YouRefIt_ERU/tree/2a8e849380ed2d253c467b1af744a514bc171372
|
TransformerLayer
|
import torch
import torch.nn as nn
import torch.onnx
class TransformerLayer(nn.Module):
def __init__(self, channels, num_heads):
super().__init__()
self.q = nn.Linear(channels, channels, bias=False)
self.k = nn.Linear(channels, channels, bias=False)
self.v = nn.Linear(channels, channels, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=channels, num_heads=num_heads
)
self.fc1 = nn.Linear(channels, channels, bias=False)
self.fc2 = nn.Linear(channels, channels, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_4, (4, 4),
(1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_6, (4,), (1,), 4),
buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_6, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf6, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_2[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_add_4[grid(16)](buf13, primals_8, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_9, (4, 4), (1,
4), 0), out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf13, buf14, reinterpret_tensor(primals_10, (
4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
return buf15, primals_2, buf0, buf1, buf2, buf9, reinterpret_tensor(buf11,
(4, 4), (4, 1), 0
), buf13, buf14, primals_10, primals_9, primals_7, reinterpret_tensor(
buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4),
(1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0
), reinterpret_tensor(primals_5, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_5, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_5, (4, 4), (4, 1), 0)
class TransformerLayerNew(nn.Module):
def __init__(self, channels, num_heads):
super().__init__()
self.q = nn.Linear(channels, channels, bias=False)
self.k = nn.Linear(channels, channels, bias=False)
self.v = nn.Linear(channels, channels, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=channels, num_heads=num_heads
)
self.fc1 = nn.Linear(channels, channels, bias=False)
self.fc2 = nn.Linear(channels, channels, bias=False)
def forward(self, input_0):
primals_1 = self.q.weight
primals_2 = self.k.weight
primals_3 = self.v.weight
primals_5 = self.ma.in_proj_weight
primals_6 = self.ma.in_proj_bias
primals_4 = self.ma.out_proj.weight
primals_8 = self.ma.out_proj.bias
primals_7 = self.fc1.weight
primals_9 = self.fc2.weight
primals_10 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
DDGRCF/YOLOX_OBB
|
TransformerLayer
| false
| 7,986
|
[
"Apache-2.0"
] | 39
|
27b80953306492b8bc83b86b1353d8cee01ef9b6
|
https://github.com/DDGRCF/YOLOX_OBB/tree/27b80953306492b8bc83b86b1353d8cee01ef9b6
|
QuadriLinearScore
|
import math
import torch
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
class QuadriLinearScore(nn.Module):
"""
Outer product version of quadrilinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, window_size=1, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(QuadriLinearScore, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.window_size = window_size
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T, std=self.std)
nn.init.normal_(self.U, std=self.std)
nn.init.normal_(self.V, std=self.std)
nn.init.normal_(self.W, std=self.std)
def forward(self, word_emb):
"""
Args:
word_emb: [batch, sent_length, wemb_size]
Returns: Tensor
[batch, sent_length-window_size, tagset_size, tagset_size]
"""
assert word_emb.size(2
) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.'
g0 = torch.matmul(word_emb[:, :-self.window_size], self.U)
g1 = torch.matmul(word_emb[:, self.window_size:], self.T)
g2 = torch.matmul(self.tag_emd, self.V)
g3 = torch.matmul(self.tag_emd, self.W)
temp01 = g0 * g1
temp012 = torch.einsum('nak,bk->nabk', [temp01, g2])
score = torch.einsum('nabk,ck->nabc', [temp012, g3])
if self.normalization:
score = score / math.sqrt(self.rank)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'wemb_size': 4, 'tagset_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 3) + 16 * (x1 // 3)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 4 * (x1 % 3) + 16 * (x1 // 3)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 19008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x2 = xindex // 1584
x3 = xindex % 1584
tmp0 = tl.load(in_ptr0 + (x0 + 396 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 396 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x3 + 1600 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 19008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x1 = xindex // 396
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 396 * (x1 % 4) + 1600 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
tl.store(out_ptr1 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.050251890762960605
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 396), (396, 1))
assert_size_stride(primals_3, (4, 396), (396, 1))
assert_size_stride(primals_4, (4, 20), (20, 1))
assert_size_stride(primals_5, (20, 396), (396, 1))
assert_size_stride(primals_6, (20, 396), (396, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(48)](primals_1, buf0, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((12, 396), (396, 1), torch.float32)
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(48)](primals_1, buf2, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((12, 396), (396, 1), torch.float32)
extern_kernels.mm(buf2, primals_3, out=buf3)
del primals_3
buf4 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_4, primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_4, primals_6, out=buf5)
buf6 = empty_strided_cuda((4, 3, 4, 396), (4800, 1600, 396, 1),
torch.float32)
triton_poi_fused_mul_2[grid(19008)](buf1, buf3, buf4, buf6, 19008,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((1, 48, 396), (19008, 396, 1), torch.float32)
buf10 = empty_strided_cuda((1, 396, 48), (19008, 1, 396), torch.float32
)
triton_poi_fused_bmm_transpose_3[grid(19008)](buf6, buf7, buf10,
19008, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
buf8 = empty_strided_cuda((1, 48, 4), (192, 4, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf5, (1, 396, 4), (0,
1, 396), 0), out=buf8)
del buf7
buf9 = reinterpret_tensor(buf8, (4, 3, 4, 4), (48, 16, 4, 1), 0)
del buf8
triton_poi_fused_div_4[grid(192)](buf9, 192, XBLOCK=256, num_warps=
4, num_stages=1)
return buf9, buf1, buf3, buf4, buf10, reinterpret_tensor(buf5, (1, 4,
396), (396, 396, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1,
20), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0
), reinterpret_tensor(primals_5, (396, 20), (1, 396), 0
), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), reinterpret_tensor(
buf0, (4, 12), (1, 4), 0)
class QuadriLinearScoreNew(nn.Module):
"""
Outer product version of quadrilinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, window_size=1, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(QuadriLinearScoreNew, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.window_size = window_size
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T, std=self.std)
nn.init.normal_(self.U, std=self.std)
nn.init.normal_(self.V, std=self.std)
nn.init.normal_(self.W, std=self.std)
def forward(self, input_0):
primals_4 = self.tag_emd
primals_2 = self.T
primals_3 = self.U
primals_5 = self.V
primals_6 = self.W
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Dadmatech/DadmaTools
|
QuadriLinearScore
| false
| 7,987
|
[
"Apache-2.0"
] | 25
|
c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
https://github.com/Dadmatech/DadmaTools/tree/c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
GlobalAvgPool2dResNext
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class GlobalAvgPool2dResNext(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dResNext, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GlobalAvgPool2dResNextNew(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dResNextNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Alibaba-MIIL/ZS_SDL
|
GlobalAvgPool2dResNext
| false
| 7,988
|
[
"MIT"
] | 20
|
769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
https://github.com/Alibaba-MIIL/ZS_SDL/tree/769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
MyUpsample2
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class MyUpsample2(nn.Module):
def forward(self, x):
return x[:, :, :, None, :, None].expand(-1, -1, -1, 2, -1, 2).reshape(x
.size(0), x.size(1), x.size(2) * 2, x.size(3) * 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 4
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0),
class MyUpsample2New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dou-Yiming/YouRefIt_ERU
|
MyUpsample2
| false
| 7,989
|
[
"MIT"
] | 13
|
2a8e849380ed2d253c467b1af744a514bc171372
|
https://github.com/Dou-Yiming/YouRefIt_ERU/tree/2a8e849380ed2d253c467b1af744a514bc171372
|
MLP
|
import torch
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
class SharedDropout(nn.Module):
def __init__(self, p=0.5, batch_first=True):
super(SharedDropout, self).__init__()
self.p = p
self.batch_first = batch_first
def extra_repr(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return s
def forward(self, x):
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p)
else:
mask = self.get_mask(x[0], self.p)
x *= mask.unsqueeze(1) if self.batch_first else mask
return x
@staticmethod
def get_mask(x, p):
mask = x.new_empty(x.shape).bernoulli_(1 - p)
mask = mask / (1 - p)
return mask
class MLP(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, identity=False):
super(MLP, self).__init__()
self.linear = nn.Linear(n_in, n_hidden)
self.identity = identity
if not self.identity:
self.activation = nn.LeakyReLU(negative_slope=0.1)
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
x = self.linear(x)
if not self.identity:
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
def __init__(self, p=0.5, batch_first=True):
super(SharedDropout, self).__init__()
self.p = p
self.batch_first = batch_first
def extra_repr(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return s
def forward(self, x):
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p)
else:
mask = self.get_mask(x[0], self.p)
x *= mask.unsqueeze(1) if self.batch_first else mask
return x
@staticmethod
def get_mask(x, p):
mask = x.new_empty(x.shape).bernoulli_(1 - p)
mask = mask / (1 - p)
return mask
class MLPNew(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, identity=False):
super(MLPNew, self).__init__()
self.linear = nn.Linear(n_in, n_hidden)
self.identity = identity
if not self.identity:
self.activation = nn.LeakyReLU(negative_slope=0.1)
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dadmatech/DadmaTools
|
MLP
| false
| 7,990
|
[
"Apache-2.0"
] | 25
|
c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
https://github.com/Dadmatech/DadmaTools/tree/c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
BiaffineAttention
|
import torch
import torch.utils.data.dataloader
from torch.nn import Parameter
import torch.nn as nn
import torch.nn
from torch.nn.parameter import Parameter
class BiaffineAttention(nn.Module):
"""
Adopted from NeuroNLP2:
https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py
Bi-Affine attention layer.
"""
def __init__(self, input_size_encoder, input_size_decoder, hidden_size=
150, num_labels=1, biaffine=True, **kwargs):
"""
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
"""
super(BiaffineAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self
.hidden_size)
self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self
.hidden_size)
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.b = Parameter(torch.Tensor(1, self.num_labels))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
hidden_size, self.hidden_size))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.W_d)
nn.init.xavier_normal_(self.W_e)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_normal_(self.U)
def forward(self, input_s, input_t, mask_d=None, mask_e=None):
"""
Args:
input_s: Tensor
the student input tensor with shape = [batch, input_size]
input_t: Tensor
the teacher input tensor with shape = [batch, num_teachers, input_size]
mask_d: None
mask_e: None
Returns: Tensor
the energy tensor with shape = [batch, length]
"""
assert input_s.size(0) == input_t.size(0
), 'batch sizes of encoder and decoder are requires to be equal.'
input_s.size()
_, _num_teachers, _ = input_t.size()
input_s = self.linear_encoder(input_s)
input_t = self.linear_decoder(input_t)
out_e = torch.matmul(self.W_e, input_s.transpose(1, 0)).transpose(1, 0)
out_d = torch.einsum('nd,bnd->bn', self.W_d, input_t)
if self.biaffine:
output = torch.einsum('bd,nde->bne', input_s, self.U)
output = torch.einsum('bne,bne->bn', output, input_t)
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None and mask_e is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3
) * mask_e.unsqueeze(1).unsqueeze(2)
output = torch.nn.functional.softmax(output, 1)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_encoder': 4, 'input_size_decoder': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data.dataloader
from torch.nn import Parameter
import torch.nn as nn
import torch.nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp10 = tmp8 + tmp9
tmp11 = tmp10 + tmp3
tmp12 = tmp11 + tmp6
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 + tmp3
tmp18 = tmp17 + tmp6
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp22 = tmp20 + tmp21
tmp23 = tmp22 + tmp3
tmp24 = tmp23 + tmp6
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tl.store(out_ptr0 + x0, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tl.store(in_out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (150, 4), (4, 1))
assert_size_stride(primals_4, (150,), (1,))
assert_size_stride(primals_5, (150, 4), (4, 1))
assert_size_stride(primals_6, (150,), (1,))
assert_size_stride(primals_7, (1, 150), (150, 1))
assert_size_stride(primals_8, (1, 150), (150, 1))
assert_size_stride(primals_9, (1, 150, 150), (22500, 150, 1))
assert_size_stride(primals_10, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 150), (150, 1), torch.float32)
extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor(
primals_3, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 150), (150, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 150), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, reinterpret_tensor(buf0, (150, 4), (1,
150), 0), out=buf2)
buf3 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_8, (1, 1, 150), (150,
150, 1), 0), reinterpret_tensor(buf1, (1, 150, 16), (0, 1, 150),
0), out=buf3)
buf4 = empty_strided_cuda((1, 4, 150), (600, 150, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 4, 150), (600, 150,
1), 0), primals_9, out=buf4)
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 150), (150, 150,
1), 0), reinterpret_tensor(buf1, (4, 150, 4), (600, 1, 150), 0),
out=buf5)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_0[grid(4)](buf5, buf3, buf2,
primals_10, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_add_1[grid(16)](buf7, buf5, buf2,
primals_10, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del buf6
del primals_10
buf8 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(16)](buf7, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf7
return buf8, primals_1, primals_7, buf0, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), buf1, buf8, reinterpret_tensor(buf4, (4, 150,
1), (150, 1, 150), 0), reinterpret_tensor(primals_9, (1, 150, 150),
(22500, 1, 150), 0), reinterpret_tensor(primals_8, (1, 150, 1), (
150, 1, 150), 0)
class BiaffineAttentionNew(nn.Module):
"""
Adopted from NeuroNLP2:
https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py
Bi-Affine attention layer.
"""
def __init__(self, input_size_encoder, input_size_decoder, hidden_size=
150, num_labels=1, biaffine=True, **kwargs):
"""
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
"""
super(BiaffineAttentionNew, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self
.hidden_size)
self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self
.hidden_size)
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.b = Parameter(torch.Tensor(1, self.num_labels))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
hidden_size, self.hidden_size))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.W_d)
nn.init.xavier_normal_(self.W_e)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_normal_(self.U)
def forward(self, input_0, input_1):
primals_7 = self.W_d
primals_8 = self.W_e
primals_10 = self.b
primals_9 = self.U
primals_3 = self.linear_encoder.weight
primals_4 = self.linear_encoder.bias
primals_5 = self.linear_decoder.weight
primals_6 = self.linear_decoder.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Dadmatech/DadmaTools
|
BiaffineAttention
| false
| 7,991
|
[
"Apache-2.0"
] | 25
|
c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
https://github.com/Dadmatech/DadmaTools/tree/c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
ShakeResNet
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'label': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (256, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (256,
16), (16, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNetNew(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNetNew, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DensoITLab/TeachAugment
|
ShakeResNet
| false
| 7,992
|
[
"BSD-2-Clause"
] | 20
|
66ec099a0afab99e18531c5437182cfe17dc30c8
|
https://github.com/DensoITLab/TeachAugment/tree/66ec099a0afab99e18531c5437182cfe17dc30c8
|
PT
|
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class PT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, epsilon, mode, K, alpha):
super(PT, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=True)
self.Linear2 = Linear(nhid, nclass, dropout, bias=True)
self.epsilon = epsilon
self.mode = mode
self.K = K
self.alpha = alpha
self.number_class = nclass
def forward(self, x):
x = torch.relu(self.Linear1(x))
return self.Linear2(x)
def loss_function(self, y_hat, y_soft, epoch=0):
if self.training:
y_hat_con = torch.detach(torch.softmax(y_hat, dim=-1))
exp = np.log(epoch / self.epsilon + 1)
if self.mode == 2:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1
), torch.mul(y_soft, y_hat_con ** exp))
) / self.number_class
elif self.mode == 1:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1
), torch.mul(y_soft, y_hat_con))) / self.number_class
else:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1
), y_soft)) / self.number_class
else:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1),
y_soft)) / self.number_class
return loss
def inference(self, h, adj):
y0 = torch.softmax(h, dim=-1)
y = y0
for _ in range(self.K):
y = (1 - self.alpha) * torch.matmul(adj, y) + self.alpha * y0
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5,
'epsilon': 4, 'mode': 4, 'K': 4, 'alpha': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_4, out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(buf1, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), buf4, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class PTNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, epsilon, mode, K, alpha):
super(PTNew, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=True)
self.Linear2 = Linear(nhid, nclass, dropout, bias=True)
self.epsilon = epsilon
self.mode = mode
self.K = K
self.alpha = alpha
self.number_class = nclass
def loss_function(self, y_hat, y_soft, epoch=0):
if self.training:
y_hat_con = torch.detach(torch.softmax(y_hat, dim=-1))
exp = np.log(epoch / self.epsilon + 1)
if self.mode == 2:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1
), torch.mul(y_soft, y_hat_con ** exp))
) / self.number_class
elif self.mode == 1:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1
), torch.mul(y_soft, y_hat_con))) / self.number_class
else:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1
), y_soft)) / self.number_class
else:
loss = -torch.sum(torch.mul(torch.log_softmax(y_hat, dim=-1),
y_soft)) / self.number_class
return loss
def inference(self, h, adj):
y0 = torch.softmax(h, dim=-1)
y = y0
for _ in range(self.K):
y = (1 - self.alpha) * torch.matmul(adj, y) + self.alpha * y0
return y
def forward(self, input_0):
primals_2 = self.Linear1.weight
primals_3 = self.Linear1.bias
primals_4 = self.Linear2.weight
primals_5 = self.Linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DongHande/PT_propagation_then_training
|
PT
| false
| 7,993
|
[
"MIT"
] | 21
|
3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
https://github.com/DongHande/PT_propagation_then_training/tree/3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
PoolReducer
|
import torch
from torch import nn
class PoolReducer(nn.Module):
def __init__(self, groups, pool_operation='max'):
super(PoolReducer, self).__init__()
if pool_operation == 'max':
self.pool = lambda x: x.max(2)[0]
elif pool_operation == 'average':
self.pool = lambda x: x.mean(2)
else:
raise ValueError('pool_operation must be in ["max","average"]')
def forward(self, x):
return self.pool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'groups': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class PoolReducerNew(nn.Module):
def __init__(self, groups, pool_operation='max'):
super(PoolReducerNew, self).__init__()
if pool_operation == 'max':
self.pool = lambda x: x.max(2)[0]
elif pool_operation == 'average':
self.pool = lambda x: x.mean(2)
else:
raise ValueError('pool_operation must be in ["max","average"]')
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dreem-Organization/RobustSleepNet
|
PoolReducer
| false
| 7,994
|
[
"MIT"
] | 16
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
https://github.com/Dreem-Organization/RobustSleepNet/tree/c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
HyperSphereLoss
|
import torch
import torch.nn as nn
class HyperSphereLoss(nn.Module):
def forward(self, input):
"""
Calcuate distance between input and N(North Pole) using hypersphere metrics.
Woo Park, Sung, and Junseok Kwon.
"Sphere Generative Adversarial Network Based on Geometric Moment Matching."
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2019.
"""
q = self.project_to_hypersphere(input)
q_norm = torch.norm(q, dim=1) ** 2
loss = 2 * q[:, -1] / (1 + q_norm)
return torch.mean(torch.acos(loss))
def project_to_hypersphere(self, v):
v_norm = torch.norm(v, dim=1, keepdim=True) ** 2
a = 2 * v / (v_norm + 1)
b = (v_norm - 1) / (v_norm + 1)
return torch.cat([a, b], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 5
x0 = xindex % 16
x2 = xindex // 80
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp8 * tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = tmp7 / tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp29 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tmp29 * tmp29
tmp31 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = libdevice.sqrt(tmp39)
tmp41 = tmp40 * tmp40
tmp42 = tmp41 - tmp21
tmp43 = tmp41 + tmp21
tmp44 = tmp42 / tmp43
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp26, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp25, tmp46)
tl.store(out_ptr0 + x3, tmp47, xmask)
@triton.jit
def triton_per_fused_acos_add_div_linalg_vector_norm_mean_mul_pow_1(in_out_ptr0
, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (64 + r0 + 80 * r1), None)
tmp3 = tl.load(in_ptr0 + (r0 + 80 * r1), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + 80 * r1), None)
tmp8 = tl.load(in_ptr0 + (32 + r0 + 80 * r1), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + 80 * r1), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp0 * tmp0
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp16 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp2 / tmp19
tmp21 = libdevice.acos(tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 64.0
tmp26 = tmp24 / tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(320)](arg0_1, buf0, 320, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_acos_add_div_linalg_vector_norm_mean_mul_pow_1[grid(1)
](buf2, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf2,
class HyperSphereLossNew(nn.Module):
def project_to_hypersphere(self, v):
v_norm = torch.norm(v, dim=1, keepdim=True) ** 2
a = 2 * v / (v_norm + 1)
b = (v_norm - 1) / (v_norm + 1)
return torch.cat([a, b], dim=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dotori-HJ/SphereGAN-Pytorch-implementation
|
HyperSphereLoss
| false
| 7,995
|
[
"MIT"
] | 11
|
fe7843545388ecdae34f374e7f1c42300ab12689
|
https://github.com/Dotori-HJ/SphereGAN-Pytorch-implementation/tree/fe7843545388ecdae34f374e7f1c42300ab12689
|
GCN
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=True)
self.Linear2 = Linear(nhid, nclass, dropout, bias=True)
def forward(self, x, adj):
x = torch.relu(self.Linear1(torch.matmul(adj, x)))
h = self.Linear2(torch.matmul(adj, x))
return torch.log_softmax(h, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2,
primals_4, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
buf4 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
del buf2
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__log_softmax_add_1[grid(64)](buf4, primals_6, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__log_softmax_add_2[grid(256)](buf7, primals_6,
buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del buf6
del primals_6
return buf7, buf7, reinterpret_tensor(buf3, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (16, 4, 4), (16, 1, 4), 0
), buf8, reinterpret_tensor(buf0, (4, 64), (1, 4), 0)
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=True)
self.Linear2 = Linear(nhid, nclass, dropout, bias=True)
def forward(self, input_0, input_1):
primals_3 = self.Linear1.weight
primals_4 = self.Linear1.bias
primals_5 = self.Linear2.weight
primals_6 = self.Linear2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
DongHande/PT_propagation_then_training
|
GCN
| false
| 7,996
|
[
"MIT"
] | 21
|
3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
https://github.com/DongHande/PT_propagation_then_training/tree/3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
TransformerEncoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class TransformerEncoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def forward(self, x, key_padding_mask=None, attn_mask=None):
residual = x
x = self.self_attn_layer_norm(x)
x, _att = self.self_attn(query=x, key=x, value=x, key_padding_mask=
key_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(16)](buf17, primals_11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (4, 4), (1,
4), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_3 = self.self_attn_layer_norm.weight
primals_7 = self.self_attn_layer_norm.bias
primals_6 = self.fc1.weight
primals_8 = self.fc1.bias
primals_10 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Daetheys/Lazimpa
|
TransformerEncoderLayer
| false
| 7,997
|
[
"MIT"
] | 15
|
21f4f4ebcdfa8b6a775b64673dd3001763c91cf1
|
https://github.com/Daetheys/Lazimpa/tree/21f4f4ebcdfa8b6a775b64673dd3001763c91cf1
|
SelfAttn
|
import torch
from torch import nn
from torch.nn import functional as F
class SelfAttn(nn.Module):
def __init__(self, denc, dropout=0, enc_trans=None, dec_trans=None):
super().__init__()
self.scorer = nn.Linear(denc, 1)
self.dropout = nn.Dropout(dropout)
self.enc_trans = enc_trans or nn.Identity()
def forward(self, enc, enc_mask):
enc_trans = self.dropout(self.enc_trans(enc))
raw_scores = self.scorer(enc).squeeze(2)
scores = F.softmax(raw_scores - (1 - enc_mask) * 1e+20, dim=1)
context = scores.unsqueeze(2).expand_as(enc_trans).sum(1)
return context, scores
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'denc': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mul_rsub_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = 1e+20
tmp8 = tmp6 * tmp7
tmp9 = tmp3 - tmp8
tmp11 = tmp10 + tmp2
tmp13 = tmp5 - tmp12
tmp14 = tmp13 * tmp7
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = tmp17 + tmp2
tmp20 = tmp5 - tmp19
tmp21 = tmp20 * tmp7
tmp22 = tmp18 - tmp21
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = tmp24 + tmp2
tmp27 = tmp5 - tmp26
tmp28 = tmp27 * tmp7
tmp29 = tmp25 - tmp28
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tmp9 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp15 - tmp30
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = tmp22 - tmp30
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp29 - tmp30
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(out_ptr1 + x0, tmp41, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = 1e+20
tmp8 = tmp6 * tmp7
tmp9 = tmp3 - tmp8
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp14 = tmp12 / tmp13
tl.store(in_out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_rsub_sub_0[grid(4)](buf0, primals_3,
primals_4, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_mul_rsub_sub_1[grid(16)](buf3, primals_3,
primals_4, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
del primals_4
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf4, buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf3
class SelfAttnNew(nn.Module):
def __init__(self, denc, dropout=0, enc_trans=None, dec_trans=None):
super().__init__()
self.scorer = nn.Linear(denc, 1)
self.dropout = nn.Dropout(dropout)
self.enc_trans = enc_trans or nn.Identity()
def forward(self, input_0, input_1):
primals_2 = self.scorer.weight
primals_3 = self.scorer.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
DreamerDeo/gazp
|
SelfAttn
| false
| 7,998
|
[
"MIT"
] | 18
|
5f823a447ffdf5176023a01516d2be7c383294d9
|
https://github.com/DreamerDeo/gazp/tree/5f823a447ffdf5176023a01516d2be7c383294d9
|
SpatialAttentionGate
|
import torch
from torch import nn
from torch.nn import functional as F
class SpatialAttentionGate(nn.Module):
def __init__(self, channel, reduction=16):
super(SpatialAttentionGate, self).__init__()
self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0)
self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x, inplace=True)
x = self.fc2(x)
x = torch.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1024)](buf1, primals_2,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf3, primals_5,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf3
class SpatialAttentionGateNew(nn.Module):
def __init__(self, channel, reduction=16):
super(SpatialAttentionGateNew, self).__init__()
self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0)
self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Diyago/Automatic-salt-deposits-segmentation
|
SpatialAttentionGate
| false
| 7,999
|
[
"MIT"
] | 20
|
fedfc7f1d9878674382eeb16a820b5f16791f4ab
|
https://github.com/Diyago/Automatic-salt-deposits-segmentation/tree/fedfc7f1d9878674382eeb16a820b5f16791f4ab
|
Whitening2d
|
import torch
from torch import nn
from torch.cuda.amp import custom_fwd
from torch.nn.functional import conv2d
class Whitening2d(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2d, self).__init__()
self.output_dim = output_dim
self.eps = eps
@custom_fwd(cast_inputs=torch.float32)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Performs whitening using the Cholesky decomposition.
Args:
x (torch.Tensor): a batch or slice of projected features.
Returns:
torch.Tensor: a batch or slice of whitened features.
"""
x = x.unsqueeze(2).unsqueeze(3)
m = x.mean(0).view(self.output_dim, -1).mean(-1).view(1, -1, 1, 1)
xn = x - m
T = xn.permute(1, 0, 2, 3).contiguous().view(self.output_dim, -1)
f_cov = torch.mm(T, T.permute(1, 0)) / (T.shape[-1] - 1)
eye = torch.eye(self.output_dim).type(f_cov.type())
f_cov_shrinked = (1 - self.eps) * f_cov + self.eps * eye
inv_sqrt = torch.triangular_solve(eye, torch.cholesky(
f_cov_shrinked), upper=False)[0]
inv_sqrt = inv_sqrt.contiguous().view(self.output_dim, self.
output_dim, 1, 1)
decorrelated = conv2d(xn, inv_sqrt)
return decorrelated.squeeze(2).squeeze(2)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = 1.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_view_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__to_copy_add_div_eye_mul_2(in_out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = x1
tmp6 = x0
tmp7 = tmp5 == tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp7, tmp3, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp4 + tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_eye_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clone_view_1[grid(4, 4)](buf0, buf1, 4, 4, XBLOCK=
4, YBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
del buf1
buf3 = buf2
del buf2
triton_poi_fused__to_copy_add_div_eye_mul_2[grid(16)](buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = torch.ops.aten.cholesky.default(buf3)
buf5 = buf4
del buf4
buf6 = buf3
del buf3
triton_poi_fused__to_copy_eye_3[grid(16)](buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = torch.ops.aten.triangular_solve.default(buf6, buf5, False)
del buf5
buf8 = buf7[0]
del buf7
buf10 = buf6
del buf6
triton_poi_fused_clone_view_1[grid(4, 4)](buf8, buf10, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
del buf8
buf11 = extern_kernels.convolution(buf0, reinterpret_tensor(buf10,
(4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 1, 1), (4, 1, 1, 1))
del buf0
del buf10
return reinterpret_tensor(buf11, (4, 4), (4, 1), 0),
class Whitening2dNew(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2dNew, self).__init__()
self.output_dim = output_dim
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DonkeyShot21/cassle
|
Whitening2d
| false
| 8,000
|
[
"MIT"
] | 13
|
d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
https://github.com/DonkeyShot21/cassle/tree/d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
GBlock
|
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
def l2_norm(v, eps=1e-10):
"""
L2 normalization
:param v:
:param eps:
:return:
"""
return v / (v.norm() + eps)
def pixel_norm(x, eps=1e-10):
"""
Pixel normalization
:param x:
:param eps:
:return:
"""
return x * torch.rsqrt(torch.mean(torch.pow(x, 2), dim=1, keepdim=True) +
eps)
class SpectralNorm(nn.Module):
"""
Spectral Normalization
"""
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2_norm(torch.mv(torch.t(w.view(height, -1).data), u.data)
)
u.data = l2_norm(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
del u, v, w
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2_norm(u.data)
v.data = l2_norm(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels, activation=nn.ReLU(
inplace=False), upsample=functools.partial(F.interpolate,
scale_factor=2)):
super(GBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.activation = activation
self.upsample = upsample
self.conv1 = SpectralNorm(nn.Conv2d(self.in_channels, self.
out_channels, kernel_size=3, padding=1))
self.conv2 = SpectralNorm(nn.Conv2d(self.out_channels, self.
out_channels, kernel_size=3, padding=1))
self.learnable_sc = in_channels != out_channels or upsample
if self.learnable_sc:
self.conv_sc = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size=1, padding=0))
self.upsample = upsample
def forward(self, x):
h = pixel_norm(x)
if self.upsample:
h = self.upsample(h)
x = self.upsample(x)
h = self.conv1(h)
h = self.activation(pixel_norm(h))
h = self.conv2(h)
if self.learnable_sc:
x = self.conv_sc(x)
return h + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import functools
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_add_mean_mul_pow_rsqrt_0(in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x4 = xindex // 64
x3 = xindex // 256
x7 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp11 = tmp10 * tmp10
tmp12 = tl.load(in_ptr0 + (16 + tmp8 + 4 * tmp4 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = tl.load(in_ptr0 + (32 + tmp8 + 4 * tmp4 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tl.load(in_ptr0 + (48 + tmp8 + 4 * tmp4 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = 4.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-10
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp9 * tmp25
tl.store(out_ptr0 + x7, tmp26, xmask)
tl.store(out_ptr1 + x7, tmp9, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.load(in_ptr0 + (36 + r0), rmask, other=0.0)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (72 + r0), rmask, other=0.0)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (108 + r0), rmask, other=0.0)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(rmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-10
tmp26 = tmp24 + tmp25
tmp27 = tmp18 / tmp26
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, rmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp27, rmask)
@triton.jit
def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 36 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4 = tmp1 / tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1e-10
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None)
@triton.jit
def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_relu_rsqrt_7(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-10
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mv_8(in_ptr0, in_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.load(in_ptr0 + (4 + r0), None)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (12 + r0), None)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-10
tmp25 = tmp23 + tmp24
tmp26 = tmp18 / tmp25
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None)
@triton.jit
def triton_per_fused_add_div_dot_linalg_vector_norm_mv_9(in_ptr0, in_ptr1,
out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-10
tmp25 = tmp23 + tmp24
tmp26 = tmp18 / tmp25
tmp27 = tmp26 * tmp18
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
@triton.jit
def triton_poi_fused_div_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_convolution_11(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (36,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (36,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_add_mean_mul_pow_rsqrt_0[grid(1024)](
primals_1, buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((36,), (1,), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
buf36 = empty_strided_cuda((36,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf4,
primals_4, primals_2, buf2, buf36, 1, 36, XBLOCK=1, num_warps=2,
num_stages=1)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_div_mv_2[grid(4)](primals_4, buf2, buf4, buf5, 4,
36, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf5, buf7,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf8 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_dot_4[grid(1)](buf7, buf5, buf8, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_div_5[grid(144)](primals_4, buf8, buf9, 144,
XBLOCK=128, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf0, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 8, 8), (256, 64, 8, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_6[grid(1024)](buf11, primals_5, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf12 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
triton_poi_fused_add_mean_mul_pow_relu_rsqrt_7[grid(1024)](buf11,
buf12, 1024, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((36,), (1,), torch.float32)
buf14 = empty_strided_cuda((), (), torch.float32)
buf15 = buf14
del buf14
buf45 = empty_strided_cuda((36,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf15,
primals_8, primals_6, buf13, buf45, 1, 36, XBLOCK=1, num_warps=
2, num_stages=1)
buf16 = buf5
del buf5
triton_per_fused_div_mv_2[grid(4)](primals_8, buf13, buf15, buf16,
4, 36, XBLOCK=1, num_warps=2, num_stages=1)
buf18 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf16, buf18,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf19 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_dot_4[grid(1)](buf18, buf16, buf19, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_div_5[grid(144)](primals_8, buf19, buf20, 144,
XBLOCK=128, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf12, buf20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 4, 8, 8), (256, 64, 8, 1))
buf24 = buf16
del buf16
triton_per_fused_add_div_linalg_vector_norm_mv_8[grid(1)](primals_12,
primals_10, buf24, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf27 = empty_strided_cuda((), (), torch.float32)
buf49 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_div_dot_linalg_vector_norm_mv_9[grid(1)](
primals_12, buf24, buf27, buf49, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
buf28 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_div_10[grid(16)](primals_12, buf27, buf28, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf27
buf29 = extern_kernels.convolution(buf1, buf28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 4, 8, 8), (256, 64, 8, 1))
buf30 = buf21
del buf21
triton_poi_fused_add_convolution_11[grid(1024)](buf30, primals_9,
buf29, primals_13, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf29
del primals_13
del primals_9
buf31 = torch.ops.aten.set_.source_Tensor(primals_2, buf7)
assert_size_stride(buf31, (4,), (1,))
del buf2
buf37 = torch.ops.aten.set_.source_Tensor(primals_3, buf36)
assert_size_stride(buf37, (36,), (1,))
del primals_3
buf40 = torch.ops.aten.set_.source_Tensor(primals_6, buf18)
assert_size_stride(buf40, (4,), (1,))
del buf13
buf46 = torch.ops.aten.set_.source_Tensor(primals_7, buf45)
assert_size_stride(buf46, (36,), (1,))
del primals_7
buf50 = torch.ops.aten.set_.source_Tensor(primals_10, buf49)
assert_size_stride(buf50, (4,), (1,))
del primals_10
buf54 = torch.ops.aten.set_.source_Tensor(primals_11, buf24)
assert_size_stride(buf54, (4,), (1,))
del primals_11
return (buf30, buf9, buf20, buf28, primals_2, primals_4, primals_6,
primals_8, primals_12, buf0, buf1, buf4, buf7, buf8, buf9, buf11,
buf12, buf15, buf18, buf19, buf20, buf24, buf28)
def l2_norm(v, eps=1e-10):
"""
L2 normalization
:param v:
:param eps:
:return:
"""
return v / (v.norm() + eps)
def pixel_norm(x, eps=1e-10):
"""
Pixel normalization
:param x:
:param eps:
:return:
"""
return x * torch.rsqrt(torch.mean(torch.pow(x, 2), dim=1, keepdim=True) +
eps)
class SpectralNorm(nn.Module):
"""
Spectral Normalization
"""
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2_norm(torch.mv(torch.t(w.view(height, -1).data), u.data)
)
u.data = l2_norm(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
del u, v, w
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2_norm(u.data)
v.data = l2_norm(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class GBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, activation=nn.ReLU(
inplace=False), upsample=functools.partial(F.interpolate,
scale_factor=2)):
super(GBlockNew, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.activation = activation
self.upsample = upsample
self.conv1 = SpectralNorm(nn.Conv2d(self.in_channels, self.
out_channels, kernel_size=3, padding=1))
self.conv2 = SpectralNorm(nn.Conv2d(self.out_channels, self.
out_channels, kernel_size=3, padding=1))
self.learnable_sc = in_channels != out_channels or upsample
if self.learnable_sc:
self.conv_sc = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size=1, padding=0))
self.upsample = upsample
def forward(self, input_0):
primals_2 = self.conv1.module.bias
primals_5 = self.conv1.module.weight_u
primals_3 = self.conv1.module.weight_v
primals_4 = self.conv1.module.weight_bar
primals_6 = self.conv2.module.bias
primals_9 = self.conv2.module.weight_u
primals_7 = self.conv2.module.weight_v
primals_8 = self.conv2.module.weight_bar
primals_10 = self.conv_sc.module.bias
primals_11 = self.conv_sc.module.weight_u
primals_13 = self.conv_sc.module.weight_v
primals_12 = self.conv_sc.module.weight_bar
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Dong-Jie-Chen/GS-WGAN
|
GBlock
| false
| 8,001
|
[
"MIT"
] | 32
|
5f33f21249431e53f44167da3ae7587e0dc695d9
|
https://github.com/Dong-Jie-Chen/GS-WGAN/tree/5f33f21249431e53f44167da3ae7587e0dc695d9
|
HardSigmoid
|
import torch
from torch import nn
class HardSigmoid(nn.Module):
def __init__(self, slope=0.1666667, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, input):
return torch.where(input >= 3, torch.tensor(1.0, dtype=torch.float)
.type_as(input), torch.where(input <= -3, torch.tensor(0.0,
dtype=torch.float).type_as(input), torch.add(torch.mul(input,
self.slope), self.offset)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_ge_le_mul_where_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 >= tmp1
tmp3 = -3.0
tmp4 = tmp0 <= tmp3
tmp5 = 0.1666667
tmp6 = tmp0 * tmp5
tmp7 = 0.5
tmp8 = tmp6 + tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = 1.0
tmp12 = tl.where(tmp2, tmp11, tmp10)
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_add_ge_le_mul_where_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardSigmoidNew(nn.Module):
def __init__(self, slope=0.1666667, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DocYard-ai/UCR
|
HardSigmoid
| false
| 8,002
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
PixelNorm
|
import torch
from torch import nn
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=
True) + 1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelNormNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dolorousrtur/style-people
|
PixelNorm
| false
| 8,003
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
MAECriterion
|
import torch
class MAECriterion(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, target):
return torch.mean(torch.abs(pred - target))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MAECriterionNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Dolorousrtur/style-people
|
MAECriterion
| false
| 8,004
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
StackingNNet
|
import torch
import numpy as np
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import torch.utils.data.distributed
import torch.utils.data
def gelu(x):
return 0.5 * x * (1 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class StackingNNet(nn.Module):
def __init__(self, input_size, output_size):
super(StackingNNet, self).__init__()
self.fc1 = nn.Linear(input_size, output_size)
def forward(self, x, labels=None):
x = self.fc1(x)
outputs = gelu(x),
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(outputs[0], labels)
outputs = (loss,) + outputs
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.utils.data.distributed
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
def gelu(x):
return 0.5 * x * (1 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class StackingNNetNew(nn.Module):
def __init__(self, input_size, output_size):
super(StackingNNetNew, self).__init__()
self.fc1 = nn.Linear(input_size, output_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ECNU-ICA/ECNU-SenseMaker
|
StackingNNet
| false
| 8,005
|
[
"MIT"
] | 16
|
24f829c3dfefccea5fecbbe75904858ec1fefffb
|
https://github.com/ECNU-ICA/ECNU-SenseMaker/tree/24f829c3dfefccea5fecbbe75904858ec1fefffb
|
CosineSimilarityLoss
|
import torch
from torch import nn
class CosineSimilarityLoss(nn.Module):
def __init__(self):
super(CosineSimilarityLoss, self).__init__()
def forward(self, x1, x2):
return 0.5 - 0.5 * torch.cosine_similarity(x1, x2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x3, xmask)
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused_mul_rsub_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp7 - tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_rsub_sum_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class CosineSimilarityLossNew(nn.Module):
def __init__(self):
super(CosineSimilarityLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DunZhang/KnowledgeDistillation
|
CosineSimilarityLoss
| false
| 8,006
|
[
"MIT"
] | 31
|
47a9dd0f51021001b53e3a76c9347eb3131f1f72
|
https://github.com/DunZhang/KnowledgeDistillation/tree/47a9dd0f51021001b53e3a76c9347eb3131f1f72
|
SEModule
|
import torch
from torch import nn
class SEModule(nn.Module):
def __init__(self, in_channels, reduction=4):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
in_channels // reduction, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(in_channels=in_channels // reduction,
out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.hardsigmoid = nn.Hardsigmoid(inplace=True)
def forward(self, inputs):
outputs = self.avg_pool(inputs)
outputs = self.conv1(outputs)
outputs = self.relu(outputs)
outputs = self.conv2(outputs)
outputs = self.hardsigmoid(outputs)
return inputs * outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_hardsigmoid_mul_2[grid(256)](primals_1,
buf4, primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_hardsigmoid_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
class SEModuleNew(nn.Module):
def __init__(self, in_channels, reduction=4):
super(SEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
in_channels // reduction, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(in_channels=in_channels // reduction,
out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.hardsigmoid = nn.Hardsigmoid(inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DocYard-ai/UCR
|
SEModule
| false
| 8,007
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
ModulatedConv2d
|
from torch.autograd import Function
import math
import torch
from torch import nn
import torch.nn.functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0],
pad[1], pad[0], pad[1]))
return out
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
if input.dtype != kernel.dtype:
input = input.type(kernel.dtype)
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style)
style = style.view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4,
'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = rindex // 16
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp12, xmask)
tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5,
buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4,
4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0
), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16,
4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16,
4, 4), (256, 16, 4, 1), 0)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0],
pad[1], pad[0], pad[1]))
return out
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
if input.dtype != kernel.dtype:
input = input.type(kernel.dtype)
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input_0, input_1):
primals_5 = self.weight
primals_2 = self.modulation.weight
primals_3 = self.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Dolorousrtur/style-people
|
ModulatedConv2d
| false
| 8,008
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
HexaLinearScore
|
import math
import torch
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
class HexaLinearScore(nn.Module):
"""
Outer product version of hexalinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(HexaLinearScore, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T1, std=self.std)
nn.init.normal_(self.T2, std=self.std)
nn.init.normal_(self.T3, std=self.std)
nn.init.normal_(self.W1, std=self.std)
nn.init.normal_(self.W2, std=self.std)
nn.init.normal_(self.W3, std=self.std)
def forward(self, word_emb):
"""
Args:
word_emb: [batch, sent_length, wemb_size]
Returns: Tensor
[batch, sent_length-window_size, tagset_size, tagset_size]
"""
assert word_emb.size(2
) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.'
g1 = torch.matmul(word_emb[:, :-2], self.W1)
g2 = torch.matmul(word_emb[:, 1:-1], self.W2)
g3 = torch.matmul(word_emb[:, 2:], self.W3)
g4 = torch.matmul(self.tag_emd, self.T1)
g5 = torch.matmul(self.tag_emd, self.T2)
g6 = torch.matmul(self.tag_emd, self.T3)
temp01 = g1 * g2 * g3
temp02 = torch.einsum('ak,bk,ck->abck', [g4, g5, g6])
score = torch.einsum('nmk,abck->nmabc', [temp01, temp02])
if self.normalization:
score = score / math.sqrt(self.rank)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'wemb_size': 4, 'tagset_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3168
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex % 1584
x0 = xindex % 396
x3 = xindex // 6336
x2 = xindex // 1584 % 4
x4 = xindex // 1584
tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 396 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 396 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x5 + 1600 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_5(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x1 = xindex // 396
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 396 * (x1 % 4) + 1600 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
tl.store(out_ptr1 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.050251890762960605
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 396), (396, 1))
assert_size_stride(primals_3, (4, 396), (396, 1))
assert_size_stride(primals_4, (4, 396), (396, 1))
assert_size_stride(primals_5, (4, 20), (20, 1))
assert_size_stride(primals_6, (20, 396), (396, 1))
assert_size_stride(primals_7, (20, 396), (396, 1))
assert_size_stride(primals_8, (20, 396), (396, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(32)](primals_1, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(32)](primals_1, buf2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
extern_kernels.mm(buf2, primals_3, out=buf3)
del primals_3
buf4 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_2[grid(32)](primals_1, buf4, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf5 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
extern_kernels.mm(buf4, primals_4, out=buf5)
del primals_4
buf6 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_5, primals_6, out=buf6)
buf7 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_5, primals_7, out=buf7)
buf8 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_5, primals_8, out=buf8)
buf9 = empty_strided_cuda((4, 2, 396), (792, 396, 1), torch.float32)
triton_poi_fused_mul_3[grid(3168)](buf1, buf3, buf5, buf9, 3168,
XBLOCK=256, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4, 396), (6400, 1600, 396, 1),
torch.float32)
triton_poi_fused_mul_4[grid(25344)](buf8, buf6, buf7, buf10, 25344,
XBLOCK=256, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((1, 396, 64), (25344, 1, 396), torch.float32
)
buf14 = empty_strided_cuda((1, 64, 396), (25344, 396, 1), torch.float32
)
triton_poi_fused_bmm_transpose_5[grid(25344)](buf10, buf11, buf14,
25344, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
buf12 = empty_strided_cuda((1, 8, 64), (512, 64, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf9, (1, 8, 396), (0, 396, 1
), 0), buf11, out=buf12)
del buf11
buf13 = reinterpret_tensor(buf12, (4, 2, 4, 4, 4), (128, 64, 16, 4,
1), 0)
del buf12
triton_poi_fused_div_6[grid(512)](buf13, 512, XBLOCK=128, num_warps
=4, num_stages=1)
return buf13, buf1, buf3, buf5, buf6, buf7, buf8, reinterpret_tensor(buf9,
(1, 396, 8), (3168, 1, 396), 0), buf14, reinterpret_tensor(primals_5,
(20, 4), (1, 20), 0), reinterpret_tensor(primals_8, (396, 20), (1,
396), 0), reinterpret_tensor(primals_7, (396, 20), (1, 396), 0
), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0
), reinterpret_tensor(buf4, (4, 8), (1, 4), 0), reinterpret_tensor(buf2
, (4, 8), (1, 4), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0)
class HexaLinearScoreNew(nn.Module):
"""
Outer product version of hexalinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(HexaLinearScoreNew, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T1, std=self.std)
nn.init.normal_(self.T2, std=self.std)
nn.init.normal_(self.T3, std=self.std)
nn.init.normal_(self.W1, std=self.std)
nn.init.normal_(self.W2, std=self.std)
nn.init.normal_(self.W3, std=self.std)
def forward(self, input_0):
primals_5 = self.tag_emd
primals_2 = self.W1
primals_3 = self.W2
primals_4 = self.W3
primals_6 = self.T1
primals_7 = self.T2
primals_8 = self.T3
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
Dadmatech/DadmaTools
|
HexaLinearScore
| false
| 8,009
|
[
"Apache-2.0"
] | 25
|
c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
https://github.com/Dadmatech/DadmaTools/tree/c1b7add5c33544f69c1ba1c5250a5ea07caf9aa2
|
FFN
|
import torch
from torch import nn
import torch.nn.functional as F
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate, training=False)
out = self.fc2(hidden)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_inner_hid': 4, 'd_model': 4, 'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class FFNNew(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFNNew, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DocYard-ai/UCR
|
FFN
| false
| 8,010
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
AdaptiveConcatPool2d
|
import torch
from torch import nn
import torch.optim
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=1):
super().__init__()
self.output_size = sz or 1
self.ap = nn.AdaptiveAvgPool2d(self.output_size)
self.mp = nn.AdaptiveMaxPool2d(self.output_size)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4)
triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
class AdaptiveConcatPool2dNew(nn.Module):
def __init__(self, sz=1):
super().__init__()
self.output_size = sz or 1
self.ap = nn.AdaptiveAvgPool2d(self.output_size)
self.mp = nn.AdaptiveMaxPool2d(self.output_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DrHB/PANDA-2nd-place-solution
|
AdaptiveConcatPool2d
| false
| 8,011
|
[
"MIT"
] | 17
|
44ab72780f9e3594811f2a7520456de7a9e677db
|
https://github.com/DrHB/PANDA-2nd-place-solution/tree/44ab72780f9e3594811f2a7520456de7a9e677db
|
StyledSiren
|
from torch.autograd import Function
import math
import torch
from torch import nn
import torch.nn.functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ModulatedSiren2d(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, demodulate=True,
is_first=False, omega_0=30):
super().__init__()
self.eps = 1e-08
self.kernel_size = 1
self.in_channel = in_channel
self.out_channel = out_channel
fan_in = in_channel
self.scale = 1 / math.sqrt(fan_in)
if is_first:
self.demod_scale = 3 * fan_in
else:
self.demod_scale = omega_0 ** 2 / 2
self.omega0 = omega_0
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
1, 1))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style)
style = style.view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) * self.
demod_scale + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, 1, 1)
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = out * self.omega0
return out
class StyledSiren(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, demodulate=True,
is_first=False, omega_0=30.0):
super().__init__()
self.conv = ModulatedSiren2d(in_channel, out_channel, style_dim,
demodulate=demodulate, is_first=is_first, omega_0=omega_0)
self.noise = NoiseInjection()
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
out = torch.sin(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.autograd import Function
import math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_rsqrt_sum_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp13 = tmp12 * tmp1
tmp15 = tmp13 * tmp14
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp19 = tmp18 * tmp1
tmp21 = tmp19 * tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = 450.0
tmp25 = tmp23 * tmp24
tmp26 = 1e-08
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_sin_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp8 = tl_math.sin(tmp7)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
triton_poi_fused_add_mul_pow_rsqrt_sum_2[grid(16)](primals_5, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
triton_poi_fused_mul_3[grid(64)](primals_5, buf2, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf3
buf5 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1))
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf7 = torch.ops.aten.normal_functional.default(buf6)
del buf6
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sin_4[grid(256)](buf5, primals_6, buf8,
buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf9, primals_4, primals_5, primals_6, buf2, reinterpret_tensor(buf4
, (16, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), buf5, buf8
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ModulatedSiren2d(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, demodulate=True,
is_first=False, omega_0=30):
super().__init__()
self.eps = 1e-08
self.kernel_size = 1
self.in_channel = in_channel
self.out_channel = out_channel
fan_in = in_channel
self.scale = 1 / math.sqrt(fan_in)
if is_first:
self.demod_scale = 3 * fan_in
else:
self.demod_scale = omega_0 ** 2 / 2
self.omega0 = omega_0
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
1, 1))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style)
style = style.view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) * self.
demod_scale + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, 1, 1)
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = out * self.omega0
return out
class StyledSirenNew(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, demodulate=True,
is_first=False, omega_0=30.0):
super().__init__()
self.conv = ModulatedSiren2d(in_channel, out_channel, style_dim,
demodulate=demodulate, is_first=is_first, omega_0=omega_0)
self.noise = NoiseInjection()
def forward(self, input_0, input_1):
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_6 = self.noise.weight
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Dolorousrtur/style-people
|
StyledSiren
| false
| 8,012
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
DotAttn
|
import torch
from torch import nn
from torch.nn import functional as F
class DotAttn(nn.Module):
def __init__(self, dropout=0, enc_trans=None, dec_trans=None):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.dec_trans = dec_trans or nn.Identity()
self.enc_trans = enc_trans or nn.Identity()
def forward(self, enc, enc_mask, dec):
enc_trans = self.dropout(self.enc_trans(enc))
dec_trans = self.dropout(self.dec_trans(dec))
Bdec, Tdec, Ddec = dec_trans.size()
Benc, Tenc, Denc = enc_trans.size()
dec_trans_exp = dec_trans.unsqueeze(1).expand(Bdec, Tenc, Tdec, Ddec)
enc_trans_exp = enc_trans.unsqueeze(2).expand(Benc, Tenc, Tdec, Denc)
mask_exp = enc_mask.unsqueeze(2).expand(Benc, Tenc, Tdec)
raw_scores_exp = dec_trans_exp.mul(enc_trans_exp).sum(3)
raw_scores_exp -= (1 - mask_exp) * 1e+20
scores_exp = F.softmax(raw_scores_exp, dim=1)
context_exp = scores_exp.unsqueeze(3).expand_as(enc_trans_exp).mul(
enc_trans_exp).sum(1)
return context_exp, scores_exp
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_rsub_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = 1.0
tmp17 = tmp16 - tmp15
tmp18 = 1e+20
tmp19 = tmp17 * tmp18
tmp20 = tmp14 - tmp19
tl.store(out_ptr0 + x4, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_rsub_sub_sum_0[grid(64)](arg1_1, arg0_1,
arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_mul_sum_3[grid(64)](buf2, arg0_1, buf3, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del arg0_1
return buf3, buf2
class DotAttnNew(nn.Module):
def __init__(self, dropout=0, enc_trans=None, dec_trans=None):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.dec_trans = dec_trans or nn.Identity()
self.enc_trans = enc_trans or nn.Identity()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg2_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
DreamerDeo/gazp
|
DotAttn
| false
| 8,013
|
[
"MIT"
] | 18
|
5f823a447ffdf5176023a01516d2be7c383294d9
|
https://github.com/DreamerDeo/gazp/tree/5f823a447ffdf5176023a01516d2be7c383294d9
|
GeM
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
import torch.optim
def gem(x, p=3, eps=1e-06):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(
1.0 / p)
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-06):
super(GeM, self).__init__()
self.p = Parameter(torch.ones(1) * p)
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.
p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp5 = libdevice.pow(tmp2, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr1 + 0)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tmp35 = tl.full([1], 1, tl.int32)
tmp36 = tmp35 / tmp34
tmp37 = 1.0
tmp38 = tmp36 * tmp37
tmp39 = libdevice.pow(tmp32, tmp38)
tl.store(out_ptr0 + x0, tmp32, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_pow_0[grid(256)](primals_2, primals_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1[grid(16)](buf0,
primals_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1, buf2
def gem(x, p=3, eps=1e-06):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(
1.0 / p)
class GeMNew(nn.Module):
def __init__(self, p=3, eps=1e-06):
super(GeMNew, self).__init__()
self.p = Parameter(torch.ones(1) * p)
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.
p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
def forward(self, input_0):
primals_1 = self.p
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
DrHB/PANDA-2nd-place-solution
|
GeM
| false
| 8,014
|
[
"MIT"
] | 17
|
44ab72780f9e3594811f2a7520456de7a9e677db
|
https://github.com/DrHB/PANDA-2nd-place-solution/tree/44ab72780f9e3594811f2a7520456de7a9e677db
|
ScaledLeakyReLU
|
import math
import torch
from torch import nn
import torch.nn.functional as F
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 1.4142135623730951
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ScaledLeakyReLUNew(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dolorousrtur/style-people
|
ScaledLeakyReLU
| false
| 8,015
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
SinkhornKnopp
|
import torch
import torch.distributed as dist
class SinkhornKnopp(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
@torch.no_grad()
def forward(self, Q: 'torch.Tensor') ->torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0]
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for it in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused_mul_7[grid(4, 4)](buf6, buf7, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_8[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4,
num_warps=1, num_stages=1)
del buf7
return buf8,
class SinkhornKnoppNew(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DonkeyShot21/cassle
|
SinkhornKnopp
| false
| 8,016
|
[
"MIT"
] | 13
|
d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
https://github.com/DonkeyShot21/cassle/tree/d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
HighwayLayer
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.utils.checkpoint
def my_xavier_init(m, gain=1):
"""Xavier initialization: weights initialization that tries to make variance of outputs
of a layer equal to variance of its inputs.
"""
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayer(torch.nn.Module):
"""Highway transformation used in span prediction."""
def __init__(self, dim):
super(HighwayLayer, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
def my_xavier_init(m, gain=1):
"""Xavier initialization: weights initialization that tries to make variance of outputs
of a layer equal to variance of its inputs.
"""
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayerNew(torch.nn.Module):
"""Highway transformation used in span prediction."""
def __init__(self, dim):
super(HighwayLayerNew, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.nlin_proj.weight
primals_5 = self.nlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CowherdChris/droidlet
|
HighwayLayer
| false
| 8,017
|
[
"MIT"
] | 26
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
https://github.com/CowherdChris/droidlet/tree/8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
PostGCN
|
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
"""
adapted from : https://github.com/tkipf/gcn/blob/92600c39797c2bfb61a508e52b88fb554df30177/gcn/layers.py#L132
"""
def __init__(self, in_features, out_features, bias=True, node_n=48):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.att = Parameter(torch.FloatTensor(node_n, node_n))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.att.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
support = torch.matmul(input, self.weight)
output = torch.matmul(self.att, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class PostGCN(nn.Module):
def __init__(self, input_feature, hidden_feature, node_n):
super(PostGCN, self).__init__()
self.input_feature = input_feature
self.hidden_feature = hidden_feature
self.node_n = node_n
self.gcn = GraphConvolution(input_feature, hidden_feature, node_n=
node_n)
def forward(self, x):
y = self.gcn(x)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_feature': 4, 'hidden_feature': 4, 'node_n': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf0, buf1, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clone_1[grid(64, 4)](buf2, primals_4, buf3, 64,
4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del buf2
del primals_4
return buf3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), primals_3, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
class GraphConvolution(nn.Module):
"""
adapted from : https://github.com/tkipf/gcn/blob/92600c39797c2bfb61a508e52b88fb554df30177/gcn/layers.py#L132
"""
def __init__(self, in_features, out_features, bias=True, node_n=48):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.att = Parameter(torch.FloatTensor(node_n, node_n))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.att.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
support = torch.matmul(input, self.weight)
output = torch.matmul(self.att, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class PostGCNNew(nn.Module):
def __init__(self, input_feature, hidden_feature, node_n):
super(PostGCNNew, self).__init__()
self.input_feature = input_feature
self.hidden_feature = hidden_feature
self.node_n = node_n
self.gcn = GraphConvolution(input_feature, hidden_feature, node_n=
node_n)
def forward(self, input_0):
primals_1 = self.gcn.weight
primals_3 = self.gcn.att
primals_4 = self.gcn.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Droliven/MSRGCN
|
PostGCN
| false
| 8,018
|
[
"MIT"
] | 28
|
5d8d8e3365d3b23ca2ac734ace7e84135a6e3a9e
|
https://github.com/Droliven/MSRGCN/tree/5d8d8e3365d3b23ca2ac734ace7e84135a6e3a9e
|
DiscountBlackLoss
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.random
import torch.nn.functional as F
class DiscountBlackLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, source, target):
"""
Ignores pixels with all channels set to zero
Probably Not suitable for greyscale images
:param source: image from network
:param target: ground truth
:return: the loss
"""
loss = F.mse_loss(source, target, reduction='none')
mask = torch.sum(target, dim=1, keepdim=True)
mask[mask > 0.0] = 1.0
mask[mask == 0.0] = 1e-06
loss = loss * mask
return torch.mean(loss), loss, mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.random
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_put_lift_fresh_sum_0(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp6 > tmp7
tmp9 = 1.0
tmp10 = tl.where(tmp8, tmp9, tmp6)
tmp11 = tmp10 == tmp7
tmp12 = 9.999999974752427e-07
tmp13 = tl.where(tmp11, tmp12, tmp10)
tl.store(in_out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_mean_mse_loss_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr1 + r3, None)
tmp4 = tl.load(in_ptr2 + (r0 + 16 * r2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + tl.broadcast_to(r3, [RBLOCK]), tmp5, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_sum_0[grid(64)](buf1, arg0_1,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_mean_mse_loss_mul_1[grid(1)](buf4, arg1_1, arg0_1,
buf1, buf2, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4, buf2, buf1
class DiscountBlackLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1], output[2]
|
DuaneNielsen/keypoints
|
DiscountBlackLoss
| false
| 8,019
|
[
"MIT"
] | 42
|
302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
https://github.com/DuaneNielsen/keypoints/tree/302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
EqualConv2dSame
|
import math
import torch
from torch import nn
import torch.nn.functional as F
class EqualConv2dSame(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_channel, in_channel,
kernel_size, kernel_size))
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
pad = self.padding
input = F.pad(input, (pad, pad, pad, pad), mode='replicate')
out = F.conv2d(input, self.weight * self.scale, bias=self.bias,
stride=self.stride)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * x2 +
(3 * (3 <= x0) + x0 * (x0 < 3))), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad2d_0[grid(256)](primals_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](primals_2, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, buf0, buf1
class EqualConv2dSameNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_channel, in_channel,
kernel_size, kernel_size))
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dolorousrtur/style-people
|
EqualConv2dSame
| false
| 8,020
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
EqualLinear
|
from torch.autograd import Function
import math
import torch
from torch import nn
import torch.nn.functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinearNew(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dolorousrtur/style-people
|
EqualLinear
| false
| 8,021
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
ModulatedSiren2d
|
from torch.autograd import Function
import math
import torch
from torch import nn
import torch.nn.functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ModulatedSiren2d(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, demodulate=True,
is_first=False, omega_0=30):
super().__init__()
self.eps = 1e-08
self.kernel_size = 1
self.in_channel = in_channel
self.out_channel = out_channel
fan_in = in_channel
self.scale = 1 / math.sqrt(fan_in)
if is_first:
self.demod_scale = 3 * fan_in
else:
self.demod_scale = omega_0 ** 2 / 2
self.omega0 = omega_0
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
1, 1))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style)
style = style.view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) * self.
demod_scale + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, 1, 1)
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = out * self.omega0
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_rsqrt_sum_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp13 = tmp12 * tmp1
tmp15 = tmp13 * tmp14
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp19 = tmp18 * tmp1
tmp21 = tmp19 * tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = 450.0
tmp25 = tmp23 * tmp24
tmp26 = 1e-08
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
triton_poi_fused_add_mul_pow_rsqrt_sum_2[grid(16)](primals_5, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
triton_poi_fused_mul_3[grid(64)](primals_5, buf2, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf3
buf5 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1))
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_mul_4[grid(256)](buf6, 256, XBLOCK=256, num_warps=
4, num_stages=1)
return buf6, primals_4, primals_5, buf2, reinterpret_tensor(buf4, (16,
4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4,
4), (256, 16, 4, 1), 0)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ModulatedSiren2dNew(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, demodulate=True,
is_first=False, omega_0=30):
super().__init__()
self.eps = 1e-08
self.kernel_size = 1
self.in_channel = in_channel
self.out_channel = out_channel
fan_in = in_channel
self.scale = 1 / math.sqrt(fan_in)
if is_first:
self.demod_scale = 3 * fan_in
else:
self.demod_scale = omega_0 ** 2 / 2
self.omega0 = omega_0
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
1, 1))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel})'
)
def forward(self, input_0, input_1):
primals_5 = self.weight
primals_2 = self.modulation.weight
primals_3 = self.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Dolorousrtur/style-people
|
ModulatedSiren2d
| false
| 8,022
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
MLP
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class MLP(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(MLP, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=True)
self.Linear2 = Linear(nhid, nclass, dropout, bias=True)
def forward(self, x):
x = torch.relu(self.Linear1(x))
return torch.log_softmax(self.Linear2(x), dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tl.store(out_ptr0 + x4, tmp11, xmask)
tl.store(out_ptr1 + x4, tmp23, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 64
x5 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr2 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__log_softmax_add_1[grid(64)](buf2, primals_5, buf3,
buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_add_2[grid(256)](buf5, primals_5,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del buf4
del primals_5
return buf5, buf5, reinterpret_tensor(buf1, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), buf6, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class MLPNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(MLPNew, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=True)
self.Linear2 = Linear(nhid, nclass, dropout, bias=True)
def forward(self, input_0):
primals_2 = self.Linear1.weight
primals_3 = self.Linear1.bias
primals_4 = self.Linear2.weight
primals_5 = self.Linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DongHande/PT_propagation_then_training
|
MLP
| false
| 8,023
|
[
"MIT"
] | 21
|
3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
https://github.com/DongHande/PT_propagation_then_training/tree/3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
SEModule
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class FastAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class SEModule(nn.Module):
def __init__(self, channels, reduction_channels, inplace=True):
super(SEModule, self).__init__()
self.avg_pool = FastAvgPool2d()
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1,
padding=0, bias=True)
self.relu = nn.ReLU(inplace=inplace)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1,
padding=0, bias=True)
self.activation = nn.Sigmoid()
def forward(self, x):
x_se = self.avg_pool(x)
x_se2 = self.fc1(x_se)
x_se2 = self.relu(x_se2)
x_se = self.fc2(x_se2)
x_se = self.activation(x_se)
return x * x_se
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'reduction_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 4, 1,
1), (4, 1, 0, 0), 0), primals_2, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf5
class FastAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class SEModuleNew(nn.Module):
def __init__(self, channels, reduction_channels, inplace=True):
super(SEModuleNew, self).__init__()
self.avg_pool = FastAvgPool2d()
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1,
padding=0, bias=True)
self.relu = nn.ReLU(inplace=inplace)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1,
padding=0, bias=True)
self.activation = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Alibaba-MIIL/ZS_SDL
|
SEModule
| false
| 8,024
|
[
"MIT"
] | 20
|
769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
https://github.com/Alibaba-MIIL/ZS_SDL/tree/769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
Encoder
|
import torch
from torch import nn
import torch.nn.functional as F
from functools import partial
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate, training=False)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = nn.Linear(in_features=d_model, out_features=d_value *
n_head, bias=False)
self.proj_fc = nn.Linear(in_features=d_value * n_head, out_features
=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=(q.shape[0], -1, self.n_head, self.d_key))
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=(k.shape[0], -1, self.n_head, self.
d_key))
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=(v.shape[0], -1, self.n_head, self.
d_value))
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat((cache_k, k), dim=2)
v = torch.cat((cache_v, v), dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(-2, -1))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate, training=False)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=(out.shape[0], -1, out.shape[2] *
out.shape[3]))
out = self.proj_fc(out)
return out
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = []
self.g = partial(self.f)
self.g1 = partial(self.f1, dropout_rate)
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.append(self.g)
elif cmd == 'n':
num = sum(1 for line in self.children())
layernorm = nn.LayerNorm(normalized_shape=d_model)
self.add_module('layer_norm_%d' % num, layernorm)
self.functors.append(layernorm)
elif cmd == 'd':
self.functors.append(self.g1)
@staticmethod
def f(x, y):
if y is not None:
return x + y
else:
return x
@staticmethod
def f1(p, x):
if p:
return F.dropout(x, p, training=False)
else:
return x
def forward(self, x, residual=None):
for i, cmd in enumerate(self.process_cmd):
if cmd == 'a':
x = self.functors[i](x, residual)
else:
x = self.functors[i](x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None,
None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
class Encoder(nn.Module):
"""
encoder
"""
def __init__(self, n_layer, n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n', postprocess_cmd='da'):
super(Encoder, self).__init__()
self.encoder_layers = list()
for i in range(n_layer):
encoder = EncoderLayer(n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd, postprocess_cmd)
self.add_module('layer_%d' % i, encoder)
self.encoder_layers.append(encoder)
self.processer = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
for encoder_layer in self.encoder_layers:
enc_output = encoder_layer(enc_input, attn_bias)
enc_input = enc_output
enc_output = self.processer(enc_output)
return enc_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_layer': 1, 'n_head': 4, 'd_key': 4, 'd_value': 4,
'd_model': 4, 'd_inner_hid': 4, 'prepostprocess_dropout': 0.5,
'attention_dropout': 0.5, 'relu_dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.nn.functional as F
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf3, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_4[grid(64)](buf8, primals_7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_add_mul_5[grid(256)](buf11, primals_7,
buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf5, buf12, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), out=buf13
)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf13, buf14, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf13
buf15 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf14, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(16)](buf15, primals_3,
buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_add_native_layer_norm_7[grid(64)](buf15, primals_3,
buf16, buf17, primals_9, primals_10, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_8[grid(64)](buf20,
primals_12, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_9[grid(64)](buf22, primals_14, buf15,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf23 = buf17
del buf17
buf24 = buf16
del buf16
triton_poi_fused_native_layer_norm_0[grid(16)](buf22, buf23, buf24,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf22, buf23, buf24,
primals_15, primals_16, buf25, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf23
del buf24
del primals_16
return buf25, primals_3, primals_9, primals_15, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 16), (16,
1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), reinterpret_tensor(buf20, (16, 4), (4, 1), 0
), buf22, primals_13, buf26, primals_11, primals_8, reinterpret_tensor(
buf12, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0
), primals_6, primals_5, primals_4
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate, training=False)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = nn.Linear(in_features=d_model, out_features=d_value *
n_head, bias=False)
self.proj_fc = nn.Linear(in_features=d_value * n_head, out_features
=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=(q.shape[0], -1, self.n_head, self.d_key))
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=(k.shape[0], -1, self.n_head, self.
d_key))
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=(v.shape[0], -1, self.n_head, self.
d_value))
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat((cache_k, k), dim=2)
v = torch.cat((cache_v, v), dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(-2, -1))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate, training=False)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=(out.shape[0], -1, out.shape[2] *
out.shape[3]))
out = self.proj_fc(out)
return out
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = []
self.g = partial(self.f)
self.g1 = partial(self.f1, dropout_rate)
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.append(self.g)
elif cmd == 'n':
num = sum(1 for line in self.children())
layernorm = nn.LayerNorm(normalized_shape=d_model)
self.add_module('layer_norm_%d' % num, layernorm)
self.functors.append(layernorm)
elif cmd == 'd':
self.functors.append(self.g1)
@staticmethod
def f(x, y):
if y is not None:
return x + y
else:
return x
@staticmethod
def f1(p, x):
if p:
return F.dropout(x, p, training=False)
else:
return x
def forward(self, x, residual=None):
for i, cmd in enumerate(self.process_cmd):
if cmd == 'a':
x = self.functors[i](x, residual)
else:
x = self.functors[i](x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None,
None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
class EncoderNew(nn.Module):
"""
encoder
"""
def __init__(self, n_layer, n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderNew, self).__init__()
self.encoder_layers = list()
for i in range(n_layer):
encoder = EncoderLayer(n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd, postprocess_cmd)
self.add_module('layer_%d' % i, encoder)
self.encoder_layers.append(encoder)
self.processer = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, input_0, input_1):
primals_1 = self.layer_0.preprocesser1.layer_norm_0.weight
primals_2 = self.layer_0.preprocesser1.layer_norm_0.bias
primals_4 = self.layer_0.self_attn.q_fc.weight
primals_5 = self.layer_0.self_attn.k_fc.weight
primals_6 = self.layer_0.self_attn.v_fc.weight
primals_8 = self.layer_0.self_attn.proj_fc.weight
primals_9 = self.layer_0.preprocesser2.layer_norm_0.weight
primals_10 = self.layer_0.preprocesser2.layer_norm_0.bias
primals_11 = self.layer_0.ffn.fc1.weight
primals_12 = self.layer_0.ffn.fc1.bias
primals_13 = self.layer_0.ffn.fc2.weight
primals_14 = self.layer_0.ffn.fc2.bias
primals_15 = self.processer.layer_norm_0.weight
primals_16 = self.processer.layer_norm_0.bias
primals_3 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
|
DocYard-ai/UCR
|
Encoder
| false
| 8,025
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
EncoderLayer
|
import torch
from torch import nn
import torch.nn.functional as F
from functools import partial
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate, training=False)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = nn.Linear(in_features=d_model, out_features=d_value *
n_head, bias=False)
self.proj_fc = nn.Linear(in_features=d_value * n_head, out_features
=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=(q.shape[0], -1, self.n_head, self.d_key))
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=(k.shape[0], -1, self.n_head, self.
d_key))
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=(v.shape[0], -1, self.n_head, self.
d_value))
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat((cache_k, k), dim=2)
v = torch.cat((cache_v, v), dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(-2, -1))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate, training=False)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=(out.shape[0], -1, out.shape[2] *
out.shape[3]))
out = self.proj_fc(out)
return out
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = []
self.g = partial(self.f)
self.g1 = partial(self.f1, dropout_rate)
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.append(self.g)
elif cmd == 'n':
num = sum(1 for line in self.children())
layernorm = nn.LayerNorm(normalized_shape=d_model)
self.add_module('layer_norm_%d' % num, layernorm)
self.functors.append(layernorm)
elif cmd == 'd':
self.functors.append(self.g1)
@staticmethod
def f(x, y):
if y is not None:
return x + y
else:
return x
@staticmethod
def f1(p, x):
if p:
return F.dropout(x, p, training=False)
else:
return x
def forward(self, x, residual=None):
for i, cmd in enumerate(self.process_cmd):
if cmd == 'a':
x = self.functors[i](x, residual)
else:
x = self.functors[i](x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None,
None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_key': 4, 'd_value': 4, 'd_model': 4,
'd_inner_hid': 4, 'prepostprocess_dropout': 0.5,
'attention_dropout': 0.5, 'relu_dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.nn.functional as F
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf3, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_4[grid(64)](buf8, primals_7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_add_mul_5[grid(256)](buf11, primals_7,
buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf5, buf12, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), out=buf13
)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf13, buf14, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf13
buf15 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf14, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(16)](buf15, primals_3,
buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_add_native_layer_norm_7[grid(64)](buf15, primals_3,
buf16, buf17, primals_9, primals_10, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf16
del buf17
del primals_10
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_8[grid(64)](buf20,
primals_12, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_9[grid(64)](buf22, primals_14, buf15,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
return buf22, primals_3, primals_9, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 16), (16, 1), 0
), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), reinterpret_tensor(buf20, (16, 4), (4, 1), 0
), primals_13, buf23, primals_11, primals_8, reinterpret_tensor(buf12,
(16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (
16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0
), primals_6, primals_5, primals_4
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate, training=False)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = nn.Linear(in_features=d_model, out_features=d_value *
n_head, bias=False)
self.proj_fc = nn.Linear(in_features=d_value * n_head, out_features
=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=(q.shape[0], -1, self.n_head, self.d_key))
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=(k.shape[0], -1, self.n_head, self.
d_key))
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=(v.shape[0], -1, self.n_head, self.
d_value))
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat((cache_k, k), dim=2)
v = torch.cat((cache_v, v), dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(-2, -1))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate, training=False)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=(out.shape[0], -1, out.shape[2] *
out.shape[3]))
out = self.proj_fc(out)
return out
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = []
self.g = partial(self.f)
self.g1 = partial(self.f1, dropout_rate)
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.append(self.g)
elif cmd == 'n':
num = sum(1 for line in self.children())
layernorm = nn.LayerNorm(normalized_shape=d_model)
self.add_module('layer_norm_%d' % num, layernorm)
self.functors.append(layernorm)
elif cmd == 'd':
self.functors.append(self.g1)
@staticmethod
def f(x, y):
if y is not None:
return x + y
else:
return x
@staticmethod
def f1(p, x):
if p:
return F.dropout(x, p, training=False)
else:
return x
def forward(self, x, residual=None):
for i, cmd in enumerate(self.process_cmd):
if cmd == 'a':
x = self.functors[i](x, residual)
else:
x = self.functors[i](x)
return x
class EncoderLayerNew(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayerNew, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, input_0, input_1):
primals_1 = self.preprocesser1.layer_norm_0.weight
primals_2 = self.preprocesser1.layer_norm_0.bias
primals_4 = self.self_attn.q_fc.weight
primals_5 = self.self_attn.k_fc.weight
primals_6 = self.self_attn.v_fc.weight
primals_8 = self.self_attn.proj_fc.weight
primals_9 = self.preprocesser2.layer_norm_0.weight
primals_10 = self.preprocesser2.layer_norm_0.bias
primals_11 = self.ffn.fc1.weight
primals_12 = self.ffn.fc1.bias
primals_13 = self.ffn.fc2.weight
primals_14 = self.ffn.fc2.bias
primals_3 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
DocYard-ai/UCR
|
EncoderLayer
| false
| 8,026
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
SpatialLogSoftmax
|
import torch
import torch.utils.data
import torch.random
import torch.nn.functional as F
def logprob_to_keypoints(prob, length):
ruler = torch.log(torch.linspace(0, 1, length, device=prob.device)
).type_as(prob).expand(1, 1, -1)
return torch.sum(torch.exp(prob + ruler), dim=2, keepdim=True).squeeze(2)
def marginal_logsoftmax(heatmap, dim):
marginal = torch.mean(heatmap, dim=dim)
sm = F.log_softmax(marginal, dim=2)
return sm
def spacial_logsoftmax(heatmap, probs=False):
height, width = heatmap.size(2), heatmap.size(3)
hp, wp = marginal_logsoftmax(heatmap, dim=3), marginal_logsoftmax(heatmap,
dim=2)
hk, wk = logprob_to_keypoints(hp, height), logprob_to_keypoints(wp, width)
if probs:
return torch.stack((hk, wk), dim=2), (torch.exp(hp), torch.exp(wp))
else:
return torch.stack((hk, wk), dim=2)
class SpatialLogSoftmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, heatmap, probs=False):
return spacial_logsoftmax(heatmap, probs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.random
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_mean_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = triton_helpers.maximum(tmp17, tmp25)
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = triton_helpers.maximum(tmp26, tmp34)
tmp36 = tmp0 + tmp9
tmp37 = tmp36 + tmp18
tmp38 = tmp37 + tmp27
tmp39 = tmp38 / tmp7
tmp40 = tmp1 + tmp10
tmp41 = tmp40 + tmp19
tmp42 = tmp41 + tmp28
tmp43 = tmp42 / tmp7
tmp44 = triton_helpers.maximum(tmp39, tmp43)
tmp45 = tmp3 + tmp12
tmp46 = tmp45 + tmp21
tmp47 = tmp46 + tmp30
tmp48 = tmp47 / tmp7
tmp49 = triton_helpers.maximum(tmp44, tmp48)
tmp50 = tmp5 + tmp14
tmp51 = tmp50 + tmp23
tmp52 = tmp51 + tmp32
tmp53 = tmp52 / tmp7
tmp54 = triton_helpers.maximum(tmp49, tmp53)
tl.store(out_ptr0 + x0, tmp35, xmask)
tl.store(out_ptr1 + x0, tmp54, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = 2.0
tmp17 = tmp15 < tmp16
tmp18 = 0.3333333333333333
tmp19 = tmp15 * tmp18
tmp20 = 0.0
tmp21 = tmp19 + tmp20
tmp22 = 3 + -1 * x0
tmp23 = tmp22.to(tl.float32)
tmp24 = tmp23 * tmp18
tmp25 = 1.0
tmp26 = tmp25 - tmp24
tmp27 = tl.where(tmp17, tmp21, tmp26)
tmp28 = tl_math.log(tmp27)
tmp29 = tmp13 + tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mean_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl.load(in_ptr0 + (3 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp21 = tl.load(in_ptr1 + 4 * x1, tmp18 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tl_math.exp(tmp21)
tmp23 = tl.load(in_ptr1 + (1 + 4 * x1), tmp18 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tl.load(in_ptr1 + (2 + 4 * x1), tmp18 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl.load(in_ptr1 + (3 + 4 * x1), tmp18 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp18, tmp31, tmp32)
tmp34 = tl.where(tmp4, tmp17, tmp33)
tl.store(out_ptr0 + x2, tmp34, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_mean_0[grid(16)](arg0_1, buf0, buf3,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_mean_1[grid(64)](arg0_1, buf0, buf1,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_add_2[grid(64)](buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused__log_softmax_mean_3[grid(64)](arg0_1, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_add_2[grid(64)](buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_stack_4[grid(32)](buf2, buf5, buf6, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf2
del buf5
return buf6,
def logprob_to_keypoints(prob, length):
ruler = torch.log(torch.linspace(0, 1, length, device=prob.device)
).type_as(prob).expand(1, 1, -1)
return torch.sum(torch.exp(prob + ruler), dim=2, keepdim=True).squeeze(2)
def marginal_logsoftmax(heatmap, dim):
marginal = torch.mean(heatmap, dim=dim)
sm = F.log_softmax(marginal, dim=2)
return sm
def spacial_logsoftmax(heatmap, probs=False):
height, width = heatmap.size(2), heatmap.size(3)
hp, wp = marginal_logsoftmax(heatmap, dim=3), marginal_logsoftmax(heatmap,
dim=2)
hk, wk = logprob_to_keypoints(hp, height), logprob_to_keypoints(wp, width)
if probs:
return torch.stack((hk, wk), dim=2), (torch.exp(hp), torch.exp(wp))
else:
return torch.stack((hk, wk), dim=2)
class SpatialLogSoftmaxNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DuaneNielsen/keypoints
|
SpatialLogSoftmax
| false
| 8,027
|
[
"MIT"
] | 42
|
302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
https://github.com/DuaneNielsen/keypoints/tree/302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
EqualConv2d
|
import math
import torch
from torch import nn
import torch.nn.functional as F
class EqualConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, replicate_pad=False):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_channel, in_channel,
kernel_size, kernel_size))
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
self.replicate_pad = replicate_pad
def forward(self, input):
pad = self.padding
if self.replicate_pad:
input = F.pad(input, (pad, pad, pad, pad), mode='replicate')
pad = 0
out = F.conv2d(input, self.weight * self.scale, bias=self.bias,
stride=self.stride, padding=pad)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_3, buf0
class EqualConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, replicate_pad=False):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_channel, in_channel,
kernel_size, kernel_size))
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
self.replicate_pad = replicate_pad
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dolorousrtur/style-people
|
EqualConv2d
| false
| 8,028
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
AdaptiveAvgMaxPool2d
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_mul_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp37 + tmp35
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp40, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_mul_0[grid(16)](buf2,
arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Alibaba-MIIL/ZS_SDL
|
AdaptiveAvgMaxPool2d
| false
| 8,029
|
[
"MIT"
] | 20
|
769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
https://github.com/Alibaba-MIIL/ZS_SDL/tree/769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
ClsHead
|
import torch
from torch import nn
import torch.nn.functional as F
class ClsHead(nn.Module):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, in_channels, class_dim, **kwargs):
super(ClsHead, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, class_dim)
def forward(self, x):
x = self.pool(x)
x = torch.reshape(x, shape=(x.shape[0], x.shape[1]))
x = self.fc(x)
if not self.training:
x = F.softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'class_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf4
class ClsHeadNew(nn.Module):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, in_channels, class_dim, **kwargs):
super(ClsHeadNew, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, class_dim)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DocYard-ai/UCR
|
ClsHead
| false
| 8,030
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
AdaIn
|
from torch.autograd import Function
import math
import torch
from torch import nn
import torch.nn.functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class AdaIn(nn.Module):
def __init__(self, num_features, style_dim, eps=0.0001):
super().__init__()
self.num_features = num_features
self.weight = self.bias = None
self.norm_layer = nn.InstanceNorm2d(num_features, eps=eps, affine=False
)
self.modulation = EqualLinear(style_dim, num_features, bias_init=1)
def forward(self, input, style):
_B, _C, _H, _W = input.shape
out = self.norm_layer(input)
gammas = self.modulation(style)
gammas = gammas[..., None, None]
out = out * gammas
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 0.0001
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex % 256
x5 = xindex // 16 % 16
x6 = xindex // 16
x1 = xindex // 16 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x5, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x6, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x7, tmp10, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0[grid(16)](buf3,
primals_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_1[grid(16)](primals_2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(buf4, (4, 4), (1, 4), 0), out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4,
1), torch.float32)
triton_poi_fused_mul_2[grid(4096)](primals_1, buf0, buf3, buf5,
primals_3, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del primals_3
return buf6, primals_1, buf0, buf3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
if input.dtype != bias.dtype:
input = input.type(bias.dtype)
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class AdaInNew(nn.Module):
def __init__(self, num_features, style_dim, eps=0.0001):
super().__init__()
self.num_features = num_features
self.weight = self.bias = None
self.norm_layer = nn.InstanceNorm2d(num_features, eps=eps, affine=False
)
self.modulation = EqualLinear(style_dim, num_features, bias_init=1)
def forward(self, input_0, input_1):
primals_2 = self.modulation.weight
primals_3 = self.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Dolorousrtur/style-people
|
AdaIn
| false
| 8,031
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
SpatialSoftmax
|
import torch
import torch.utils.data
import torch.random
import torch.nn.functional as F
def marginal_softmax(heatmap, dim):
marginal = torch.mean(heatmap, dim=dim)
sm = F.softmax(marginal, dim=2)
return sm
def prob_to_keypoints(prob, length):
ruler = torch.linspace(0, 1, length).type_as(prob).expand(1, 1, -1)
return torch.sum(prob * ruler, dim=2, keepdim=True).squeeze(2)
def spacial_softmax(heatmap, probs=False):
height, width = heatmap.size(2), heatmap.size(3)
hp, wp = marginal_softmax(heatmap, dim=3), marginal_softmax(heatmap, dim=2)
hk, wk = prob_to_keypoints(hp, height), prob_to_keypoints(wp, width)
if probs:
return torch.stack((hk, wk), dim=2), (hp, wp)
else:
return torch.stack((hk, wk), dim=2)
class SpatialSoftmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, heatmap, probs=False):
return spacial_softmax(heatmap, probs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.random
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mean_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = triton_helpers.maximum(tmp17, tmp25)
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = triton_helpers.maximum(tmp26, tmp34)
tmp36 = tmp0 + tmp9
tmp37 = tmp36 + tmp18
tmp38 = tmp37 + tmp27
tmp39 = tmp38 / tmp7
tmp40 = tmp1 + tmp10
tmp41 = tmp40 + tmp19
tmp42 = tmp41 + tmp28
tmp43 = tmp42 / tmp7
tmp44 = triton_helpers.maximum(tmp39, tmp43)
tmp45 = tmp3 + tmp12
tmp46 = tmp45 + tmp21
tmp47 = tmp46 + tmp30
tmp48 = tmp47 / tmp7
tmp49 = triton_helpers.maximum(tmp44, tmp48)
tmp50 = tmp5 + tmp14
tmp51 = tmp50 + tmp23
tmp52 = tmp51 + tmp32
tmp53 = tmp52 / tmp7
tmp54 = triton_helpers.maximum(tmp49, tmp53)
tl.store(out_ptr0 + x0, tmp35, xmask)
tl.store(out_ptr1 + x0, tmp54, xmask)
@triton.jit
def triton_poi_fused__softmax_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp8 = 0.0
tmp9 = 2.0
tmp10 = tmp8 < tmp9
tmp11 = tl.where(tmp10, tmp8, tmp8)
tmp12 = tmp7 * tmp11
tmp13 = tmp1 / tmp6
tmp14 = 1.0
tmp15 = tmp14 < tmp9
tmp16 = 0.3333333333333333
tmp17 = 0.33333333333333337
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp13 * tmp18
tmp20 = tmp12 + tmp19
tmp21 = tmp3 / tmp6
tmp22 = tmp9 < tmp9
tmp23 = 0.6666666666666666
tmp24 = 0.6666666666666667
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tmp21 * tmp25
tmp27 = tmp20 + tmp26
tmp28 = tmp5 / tmp6
tmp29 = 3.0
tmp30 = tmp29 < tmp9
tmp31 = tl.where(tmp30, tmp14, tmp14)
tmp32 = tmp28 * tmp31
tmp33 = tmp27 + tmp32
tl.store(out_ptr0 + x0, tmp33, xmask)
@triton.jit
def triton_poi_fused__softmax_mean_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + x1, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mean_0[grid(16)](arg0_1, buf0, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_mean_1[grid(64)](arg0_1, buf0, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0)
del buf0
triton_poi_fused__softmax_mul_sum_2[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused__softmax_mean_3[grid(64)](arg0_1, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0)
del buf3
triton_poi_fused__softmax_mul_sum_2[grid(16)](buf4, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_stack_4[grid(32)](buf2, buf5, buf6, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf2
del buf5
return buf6,
def marginal_softmax(heatmap, dim):
marginal = torch.mean(heatmap, dim=dim)
sm = F.softmax(marginal, dim=2)
return sm
def prob_to_keypoints(prob, length):
ruler = torch.linspace(0, 1, length).type_as(prob).expand(1, 1, -1)
return torch.sum(prob * ruler, dim=2, keepdim=True).squeeze(2)
def spacial_softmax(heatmap, probs=False):
height, width = heatmap.size(2), heatmap.size(3)
hp, wp = marginal_softmax(heatmap, dim=3), marginal_softmax(heatmap, dim=2)
hk, wk = prob_to_keypoints(hp, height), prob_to_keypoints(wp, width)
if probs:
return torch.stack((hk, wk), dim=2), (hp, wp)
else:
return torch.stack((hk, wk), dim=2)
class SpatialSoftmaxNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DuaneNielsen/keypoints
|
SpatialSoftmax
| false
| 8,032
|
[
"MIT"
] | 42
|
302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
https://github.com/DuaneNielsen/keypoints/tree/302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
GRUCell
|
import torch
import torch.nn as nn
import torch.utils.checkpoint
class GRUCell(nn.Module):
def __init__(self, x_dim, h_dim):
super(GRUCell, self).__init__()
self.r = nn.Linear(x_dim + h_dim, h_dim, True)
self.z = nn.Linear(x_dim + h_dim, h_dim, True)
self.c = nn.Linear(x_dim, h_dim, True)
self.u = nn.Linear(h_dim, h_dim, True)
def forward(self, x, h):
rz_input = torch.cat((x, h), -1)
r = torch.sigmoid(self.r(rz_input))
z = torch.sigmoid(self.z(rz_input))
u = torch.tanh(self.c(x) + r * self.u(h))
new_h = z * h + (1 - z) * u
return new_h
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'x_dim': 4, 'h_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp9 = tl.load(in_ptr4 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp5 * tmp12
tmp14 = tmp3 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf3)
del primals_7
del primals_8
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(primals_2, (64,
4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf4)
del primals_10
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf2,
primals_2, buf3, buf1, buf4, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
return buf5, primals_2, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf1, buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf3, buf4
class GRUCellNew(nn.Module):
def __init__(self, x_dim, h_dim):
super(GRUCellNew, self).__init__()
self.r = nn.Linear(x_dim + h_dim, h_dim, True)
self.z = nn.Linear(x_dim + h_dim, h_dim, True)
self.c = nn.Linear(x_dim, h_dim, True)
self.u = nn.Linear(h_dim, h_dim, True)
def forward(self, input_0, input_1):
primals_3 = self.r.weight
primals_4 = self.r.bias
primals_5 = self.z.weight
primals_6 = self.z.bias
primals_7 = self.c.weight
primals_8 = self.c.bias
primals_9 = self.u.weight
primals_10 = self.u.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
DeepLearnXMU/IRSEG
|
GRUCell
| false
| 8,033
|
[
"Apache-2.0"
] | 14
|
6027c9601dbcb626d4adaf429c4bed07febb1034
|
https://github.com/DeepLearnXMU/IRSEG/tree/6027c9601dbcb626d4adaf429c4bed07febb1034
|
GroupNorm
|
import torch
import torch.nn as nn
class GroupNorm(nn.Module):
def __init__(self, c_num, group_num=16, eps=1e-10):
"""
The groupnorm layer from https://arxiv.org/abs/1803.08494
Args:
c_num (int): Number of input channels
group_num (int): Number of group by which to divide the channels
eps (float): Epsilon
"""
super(GroupNorm, self).__init__()
self.group_num = group_num
self.gamma = nn.Parameter(torch.ones(c_num, 1, 1))
self.beta = nn.Parameter(torch.zeros(c_num, 1, 1))
self.eps = eps
def forward(self, x):
batch_size, channels, height, width = x.size()
x = x.view(batch_size, self.group_num, -1)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x = (x - mean) / (std + self.eps)
x = x.view(batch_size, channels, height, width)
return x * self.gamma + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c_num': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = xindex // 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-10
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class GroupNormNew(nn.Module):
def __init__(self, c_num, group_num=16, eps=1e-10):
"""
The groupnorm layer from https://arxiv.org/abs/1803.08494
Args:
c_num (int): Number of input channels
group_num (int): Number of group by which to divide the channels
eps (float): Epsilon
"""
super(GroupNormNew, self).__init__()
self.group_num = group_num
self.gamma = nn.Parameter(torch.ones(c_num, 1, 1))
self.beta = nn.Parameter(torch.zeros(c_num, 1, 1))
self.eps = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EKami/EzeeML
|
GroupNorm
| false
| 8,034
|
[
"MIT"
] | 35
|
21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
https://github.com/EKami/EzeeML/tree/21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
CTCHead
|
import torch
from torch import nn
class CTCHead(nn.Module):
def __init__(self, in_channels, out_channels, fc_decay=0.0004, **kwargs):
super(CTCHead, self).__init__()
self.fc = nn.Linear(in_channels, out_channels)
self.softmax = nn.Softmax(dim=2)
self.out_channels = out_channels
def forward(self, x, labels=None):
predicts = self.fc(x)
if not self.training:
predicts = self.softmax(predicts)
return predicts
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class CTCHeadNew(nn.Module):
def __init__(self, in_channels, out_channels, fc_decay=0.0004, **kwargs):
super(CTCHeadNew, self).__init__()
self.fc = nn.Linear(in_channels, out_channels)
self.softmax = nn.Softmax(dim=2)
self.out_channels = out_channels
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DocYard-ai/UCR
|
CTCHead
| false
| 8,035
|
[
"Apache-2.0"
] | 10
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
https://github.com/DocYard-ai/UCR/tree/7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
FlowfieldDiscountedLoss
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.random
import torch.nn.functional as F
class FlowfieldDiscountedLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, source, target, flowfield):
loss = F.mse_loss(source, target, reduction='none')
bad_bits = flowfield ** 2
bad_bits[bad_bits <= 1.0] = 1.0
bad_bits[bad_bits > 1.0] = 0
mask = torch.prod(bad_bits, 3).expand(1, -1, -1, -1).permute(1, 0, 2, 3
)
loss = loss * mask
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.random
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_pow_0(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 <= tmp2
tmp4 = tl.where(tmp3, tmp2, tmp1)
tmp5 = tmp4 > tmp2
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp4)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_per_fused_mse_loss_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr1 + r3, None)
tmp4 = tl.load(in_ptr2 + (4 * r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr2 + (1 + 4 * r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (2 + 4 * r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + (3 + 4 * r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 * tmp9
tmp11 = tmp3 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_pow_0[grid(256)](buf1, arg2_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_mse_loss_mul_sum_1[grid(1)](arg1_1, arg0_1, buf1,
buf2, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del buf1
return buf2,
class FlowfieldDiscountedLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
DuaneNielsen/keypoints
|
FlowfieldDiscountedLoss
| false
| 8,036
|
[
"MIT"
] | 42
|
302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
https://github.com/DuaneNielsen/keypoints/tree/302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
Coords
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.random
class Coords(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
""" adds 2 channels that carry co-ordinate information """
b, h, w = x.size(0), x.size(2), x.size(3)
hm = torch.linspace(0, 1, h, dtype=x.dtype, device=x.device).reshape(
1, 1, h, 1).repeat(b, 1, 1, w)
wm = torch.linspace(0, 1, w, dtype=x.dtype, device=x.device).reshape(
1, 1, 1, w).repeat(b, 1, h, 1)
return torch.cat((x, hm, wm), dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.random
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x7 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x1
tmp11 = tmp10.to(tl.float32)
tmp12 = 2.0
tmp13 = tmp11 < tmp12
tmp14 = 0.3333333333333333
tmp15 = tmp11 * tmp14
tmp16 = 0.0
tmp17 = tmp15 + tmp16
tmp18 = 3 + -1 * x1
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp19 * tmp14
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tmp23 = tl.where(tmp13, tmp17, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp9, tmp23, tmp24)
tmp26 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp29 = x0
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp30 < tmp12
tmp32 = tmp30 * tmp14
tmp33 = tmp32 + tmp16
tmp34 = 3 + -1 * x0
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp35 * tmp14
tmp37 = tmp21 - tmp36
tmp38 = tl.where(tmp31, tmp33, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp26, tmp38, tmp39)
tmp41 = tl.where(tmp9, tmp25, tmp40)
tmp42 = tl.where(tmp4, tmp5, tmp41)
tl.store(out_ptr0 + x7, tmp42, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CoordsNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DuaneNielsen/keypoints
|
Coords
| false
| 8,037
|
[
"MIT"
] | 42
|
302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
https://github.com/DuaneNielsen/keypoints/tree/302fa02966d4372ac9b5aaa3d8dc24684be0b252
|
ContrastiveLoss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, output1, output2, target, size_average=True):
distances = (output2 - output1).pow(2).sum(1)
losses = 0.5 * (target.float() * distances + (1 + -1 * target).
float() * F.relu(self.margin - (distances + self.eps).sqrt()).
pow(2))
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp0 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = 1e-09
tmp8 = tmp1 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 4.0
tmp11 = tmp10 - tmp9
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tmp13 * tmp13
tmp15 = tmp6 * tmp14
tmp16 = tmp2 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
EmmaW8/EISNet
|
ContrastiveLoss
| false
| 8,038
|
[
"MIT"
] | 40
|
97c420d6763c5f825e88ed732edee4e75f3b947e
|
https://github.com/EmmaW8/EISNet/tree/97c420d6763c5f825e88ed732edee4e75f3b947e
|
GlobalAveragePool
|
import torch
from torch import nn
class GlobalAveragePool(nn.Module):
"""
Average pooling in an equivariant network
"""
def __init__(self):
"""
"""
super().__init__()
def forward(self, x):
"""
"""
avg = torch.mean(x, dim=[-2, -1], keepdim=True)
return avg
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GlobalAveragePoolNew(nn.Module):
"""
Average pooling in an equivariant network
"""
def __init__(self):
"""
"""
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ElisevanderPol/symmetrizer
|
GlobalAveragePool
| false
| 8,039
|
[
"MIT"
] | 16
|
8dae02bee2ba7132ae4fb07e07020767d280842c
|
https://github.com/ElisevanderPol/symmetrizer/tree/8dae02bee2ba7132ae4fb07e07020767d280842c
|
TripletLoss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 4.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
EmmaW8/EISNet
|
TripletLoss
| false
| 8,040
|
[
"MIT"
] | 40
|
97c420d6763c5f825e88ed732edee4e75f3b947e
|
https://github.com/EmmaW8/EISNet/tree/97c420d6763c5f825e88ed732edee4e75f3b947e
|
AdaptiveCatAvgMaxPool2d
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0)
triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
class AdaptiveCatAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Alibaba-MIIL/ZS_SDL
|
AdaptiveCatAvgMaxPool2d
| false
| 8,041
|
[
"MIT"
] | 20
|
769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
https://github.com/Alibaba-MIIL/ZS_SDL/tree/769fe4f57d2d458a7c4b5468a6395c9b296b1dad
|
FocalLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, logit, target):
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + ((-max_val).exp() + (-
logit - max_val).exp()).log()
invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
if len(loss.size()) == 2:
loss = loss.sum(dim=1)
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp0 * tmp2
tmp18 = tmp0 - tmp17
tmp19 = triton_helpers.maximum(tmp1, tmp8)
tmp20 = tmp18 + tmp19
tmp21 = -tmp19
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp1 - tmp19
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tl_math.log(tmp25)
tmp27 = tmp20 + tmp26
tmp28 = tmp16 * tmp27
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0))
tmp32 = 256.0
tmp33 = tmp31 / tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EMUNES/Auto-Subtitle-File-Generation
|
FocalLoss
| false
| 8,042
|
[
"Apache-2.0"
] | 33
|
535a6351f450b1970da50bbbf4cc6d2f442ec335
|
https://github.com/EMUNES/Auto-Subtitle-File-Generation/tree/535a6351f450b1970da50bbbf4cc6d2f442ec335
|
Conv2d
|
import torch
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, bn=False, dilation=1):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = []
if dilation == 1:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding, dilation=dilation)
else:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=dilation, dilation=dilation)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class Conv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, bn=False, dilation=1):
super(Conv2dNew, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = []
if dilation == 1:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding, dilation=dilation)
else:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=dilation, dilation=dilation)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Elin24/DCL-CrowdCounting
|
Conv2d
| false
| 8,043
|
[
"MIT"
] | 12
|
2f8e68a2d29a8599e795b502f21b4de778e6214c
|
https://github.com/Elin24/DCL-CrowdCounting/tree/2f8e68a2d29a8599e795b502f21b4de778e6214c
|
NoiseInjection
|
import torch
from torch import nn
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf2,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf3, buf2
class NoiseInjectionNew(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Dolorousrtur/style-people
|
NoiseInjection
| false
| 8,044
|
[
"MIT"
] | 15
|
c48b12b245cc50f8230c0654dffe40016f2a69f1
|
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
pass
def forward(self, input, target):
input = input.contiguous().view(input.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = 2 * a / (b + c)
return 1 - d
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tmp1 * tmp1
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 2.0
tmp18 = tmp6 * tmp17
tmp19 = 0.001
tmp20 = tmp11 + tmp19
tmp21 = tmp16 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp18 / tmp22
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp25, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(4)](buf3, arg0_1,
arg1_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
pass
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EryiXie/PlaneRecNet
|
DiceLoss
| false
| 8,045
|
[
"MIT"
] | 34
|
534e23e6c5db2235ab1e5a9419fb4bfec3ffa943
|
https://github.com/EryiXie/PlaneRecNet/tree/534e23e6c5db2235ab1e5a9419fb4bfec3ffa943
|
APPNP
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class APPNP(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, K, alpha):
super(APPNP, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=False)
self.Linear2 = Linear(nhid, nclass, dropout, bias=False)
self.alpha = alpha
self.K = K
def forward(self, x, adj):
x = torch.relu(self.Linear1(x))
h0 = self.Linear2(x)
h = h0
for _ in range(self.K):
h = (1 - self.alpha) * torch.matmul(adj, h) + self.alpha * h0
return torch.log_softmax(h, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5, 'K': 4,
'alpha': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_view_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -3.0
tmp2 = tmp0 * tmp1
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_mul_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = -3.0
tmp2 = tmp0 * tmp1
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp8 = tmp7 * tmp1
tmp10 = tmp9 * tmp4
tmp11 = tmp8 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp14 = tmp13 * tmp1
tmp16 = tmp15 * tmp4
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp20 = tmp19 * tmp1
tmp22 = tmp21 * tmp4
tmp23 = tmp20 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tl.store(out_ptr0 + x0, tmp24, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_mul_3(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = -3.0
tmp2 = tmp0 * tmp1
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp8 = tmp6 - tmp7
tmp10 = tl_math.log(tmp9)
tmp11 = tmp8 - tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_view_0[grid(256)](buf0,
buf1, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_mul_1[grid(256)](buf4, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0),
out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_mul_1[grid(256)](buf6, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0),
out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_add_mul_1[grid(256)](buf8, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0),
out=buf9)
del buf8
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__log_softmax_add_mul_2[grid(64)](buf9, buf2, buf10,
buf11, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_add_mul_3[grid(256)](buf12, buf9,
buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
del buf11
del buf9
return buf12, buf12, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), buf13, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class Linear(nn.Module):
def __init__(self, in_features, out_features, dropout, bias=False):
super(Linear, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.randn(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.randn(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))
if self.bias is not None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
input = F.dropout(input, self.dropout, training=self.training)
output = torch.matmul(input, self.weight)
if self.bias is not None:
return output + self.bias
else:
return output
class APPNPNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, K, alpha):
super(APPNPNew, self).__init__()
self.Linear1 = Linear(nfeat, nhid, dropout, bias=False)
self.Linear2 = Linear(nhid, nclass, dropout, bias=False)
self.alpha = alpha
self.K = K
def forward(self, input_0, input_1):
primals_2 = self.Linear1.weight
primals_3 = self.Linear2.weight
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
DongHande/PT_propagation_then_training
|
APPNP
| false
| 8,046
|
[
"MIT"
] | 21
|
3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
https://github.com/DongHande/PT_propagation_then_training/tree/3f346ff161d2a0b807e3c0269ad26a7266305cc3
|
GatedConv1d
|
import torch
import torch.nn as nn
import torch.nn
class GatedConv1d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding=0, dilation=1, activation=None):
super(GatedConv1d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv1d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv1d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size':
4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_out_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(in_out_ptr1 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 1), (4, 1, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 1), (4, 1, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(4)](buf1, buf3,
primals_2, primals_5, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_2
del primals_5
return buf4, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4), (16, 4, 1), 0), buf1, buf3
class GatedConv1dNew(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding=0, dilation=1, activation=None):
super(GatedConv1dNew, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv1d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv1d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, input_0):
primals_1 = self.h.weight
primals_2 = self.h.bias
primals_4 = self.g.weight
primals_5 = self.g.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
EmilSkaaning/DeepStruc
|
GatedConv1d
| false
| 8,047
|
[
"Apache-2.0"
] | 11
|
4de0233caba11523b8f5deead53e1c70c05b346b
|
https://github.com/EmilSkaaning/DeepStruc/tree/4de0233caba11523b8f5deead53e1c70c05b346b
|
Hsigmoid
|
import torch
import torch.nn as nn
import torch.quantization
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.float_op = nn.quantized.FloatFunctional()
self.relu6 = nn.ReLU6(inplace=inplace)
def forward(self, x):
relu6 = self.relu6(self.float_op.add_scalar(x, 3.0))
return self.float_op.mul_scalar(relu6, 1 / 6.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.quantization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
def __init__(self, inplace=True):
super(HsigmoidNew, self).__init__()
self.float_op = nn.quantized.FloatFunctional()
self.relu6 = nn.ReLU6(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Edgecortix-Inc/pytorch_quantization
|
Hsigmoid
| false
| 8,048
|
[
"Apache-2.0"
] | 13
|
ad7120439f473d539adec22930a8363bfb63e830
|
https://github.com/Edgecortix-Inc/pytorch_quantization/tree/ad7120439f473d539adec22930a8363bfb63e830
|
FocalLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp30 - tmp29
tmp32 = tmp30 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 64.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLossNew(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super(FocalLossNew, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EricZsy/BalancedKnowledgeDistillation
|
FocalLoss
| false
| 8,049
|
[
"MIT"
] | 22
|
88a2de840a3fc6eb2ee881c729f293b8e78714aa
|
https://github.com/EricZsy/BalancedKnowledgeDistillation/tree/88a2de840a3fc6eb2ee881c729f293b8e78714aa
|
CharbonnierLoss
|
import torch
import torch.nn as nn
class CharbonnierLoss(nn.Module):
def __init__(self):
"""
L1 Charbonnierloss.
"""
super(CharbonnierLoss, self).__init__()
def forward(self, x, y, eps=1e-06):
diff = y - x
error = torch.sqrt(diff * diff + eps)
loss = torch.mean(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_sqrt_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class CharbonnierLossNew(nn.Module):
def __init__(self):
"""
L1 Charbonnierloss.
"""
super(CharbonnierLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EKami/EzeeML
|
CharbonnierLoss
| false
| 8,050
|
[
"MIT"
] | 35
|
21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
https://github.com/EKami/EzeeML/tree/21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
Hflip
|
import torch
from torch import nn
def hflip(input: 'torch.Tensor') ->torch.Tensor:
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
"""
return torch.flip(input, [-1])
class Hflip(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.hflip(input)
tensor([[[0, 0, 0],
[0, 0, 0],
[1, 1, 0]]])
"""
def __init__(self) ->None:
super(Hflip, self).__init__()
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return hflip(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + -1 * x0 + 4 * x1), xmask, eviction_policy
='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hflip(input: 'torch.Tensor') ->torch.Tensor:
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
"""
return torch.flip(input, [-1])
class HflipNew(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.hflip(input)
tensor([[[0, 0, 0],
[0, 0, 0],
[1, 1, 0]]])
"""
def __init__(self) ->None:
super(HflipNew, self).__init__()
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ElementAI/bilevel_augment
|
Hflip
| false
| 8,051
|
[
"Apache-2.0"
] | 22
|
b43997d41d8452d362450e267503c8be18f1be4a
|
https://github.com/ElementAI/bilevel_augment/tree/b43997d41d8452d362450e267503c8be18f1be4a
|
ResidualBlock
|
import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.in1 = nn.InstanceNorm2d(channels)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.in2 = nn.InstanceNorm2d(channels)
def forward(self, x):
residual = self.conv1(x)
residual = self.in1(residual)
residual = self.prelu(residual)
residual = self.conv2(residual)
residual = self.in2(residual)
return x + residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + 0)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp30 = tmp29 * tmp25
tmp31 = tl.where(tmp27, tmp25, tmp30)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp31, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp2 - tmp12
tmp21 = 16.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp19 + tmp26
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask)
tl.store(out_ptr3 + x3, tmp25, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0[
grid(16)](buf1, buf5, primals_2, primals_4, buf2, buf6, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_1[grid(16)](
buf8, primals_6, primals_3, buf9, buf13, buf12, 16, 16, XBLOCK=
1, num_warps=2, num_stages=1)
del primals_6
return (buf13, primals_1, primals_3, primals_4, primals_5, buf1, buf2,
buf5, buf6, buf8, reinterpret_tensor(buf12, (16,), (1,), 0),
reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.in1 = nn.InstanceNorm2d(channels)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.in2 = nn.InstanceNorm2d(channels)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.prelu.weight
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
EKami/EzeeML
|
ResidualBlock
| false
| 8,052
|
[
"MIT"
] | 35
|
21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
https://github.com/EKami/EzeeML/tree/21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
DotProduct
|
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
class DotProduct(nn.Module):
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
"""
Inputs:
x - (N, F)
y - (N, F)
Output:
output - (N, 1) dot-product output
"""
assert len(x.shape) == 2
assert len(y.shape) == 2
assert x.shape == y.shape
x = nn.functional.normalize(x, dim=1)
y = nn.functional.normalize(y, dim=1)
output = torch.matmul(x.unsqueeze(1), y.unsqueeze(2)).squeeze(2)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 0), 0), out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 1), (1, 1), 0),
class DotProductNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EGO4D/episodic-memory
|
DotProduct
| false
| 8,053
|
[
"MIT"
] | 27
|
2a3464882cd4f665c358c1b05a6397339e33c2e1
|
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
|
InvGridSamplerNumerator
|
import torch
import numpy as np
import torch.nn.functional as F
from torch import nn
import torch.utils.data
def ravel_multi_index(indices, shape):
indices_ravel = indices[0]
for i in range(1, len(indices)):
indices_ravel = indices_ravel * shape[i] + indices[i]
return indices_ravel
def add_repeated(t, indices, values):
shape = t.shape
t_ravel = t.view(t.numel())
indices_ravel = ravel_multi_index(indices, shape)
t_ravel.index_add_(0, indices_ravel, values)
def meshgrid_tensor(*sizes, normalize=True, device='cuda:0'):
if not normalize:
aranges = [torch.arange(cur_size, device=device) for cur_size in sizes]
grids = torch.meshgrid(aranges)
grid = torch.stack(grids, dim=-1)
else:
aranges = [torch.arange(cur_size, device=device).float() for
cur_size in sizes]
grids = torch.meshgrid(aranges)
grid = np.stack([(cur_grid / float(max(sizes[i] - 1, 1))) for i,
cur_grid in enumerate(grids)], dim=-1)
return grid
class InvGridSamplerNumerator(nn.Module):
eps = 1e-10
def __init__(self, OH=None, OW=None):
super(InvGridSamplerNumerator, self).__init__()
self.OH = OH
self.OW = OW
def forward(self, x, inv_grid):
eps = InvGridSamplerNumerator.eps
batch_size, n_channels, h, w = x.size(0), x.size(1), x.size(2
) if self.OH is None else self.OH, x.size(3
) if self.OW is None else self.OW
inv_grid = (inv_grid.clone() + 1) / 2.0
inv_grid[..., 0] *= h
inv_grid[..., 1] *= w
inv_grid += 1
inv_grid = torch.stack([inv_grid[..., 0].clamp(0, h + 1 - 2 * eps),
inv_grid[..., 1].clamp(0, w + 1 - 2 * eps)], dim=-1)
inv_grid = inv_grid[:, np.newaxis].repeat(1, n_channels, 1, 1, 1)
A = torch.zeros((batch_size, n_channels, h + 3, w + 3), device=x.device
)
mgrid = meshgrid_tensor(batch_size, n_channels, x.size(2), x.size(3
), normalize=False, device=inv_grid.device)
input_cells = mgrid.view(-1, mgrid.size(4))
input_inds_b, input_inds_ch, _input_inds_i, _input_inds_j = (
input_cells[..., 0], input_cells[..., 1], input_cells[..., 2],
input_cells[..., 3])
output_inds_b = input_inds_b
output_inds_ch = input_inds_ch
output_cells_float = inv_grid.view(-1, inv_grid.size(4))
output_cells_long = output_cells_float.long()
for di in range(0, 2):
output_inds_i = output_cells_long[..., 0] + di
corners_i = output_inds_i.float()
bilinear_weights_i = F.relu(1 - torch.abs(output_cells_float[
..., 0] - corners_i))
for dj in range(0, 2):
output_inds_j = output_cells_long[..., 1] + dj
corners_j = output_inds_j.float()
bilinear_weights = bilinear_weights_i * F.relu(1 - torch.
abs(output_cells_float[..., 1] - corners_j))
add_repeated(A, (output_inds_b, output_inds_ch,
output_inds_i, output_inds_j), x.reshape(-1) *
bilinear_weights)
A = A[..., 1:h + 1, 1:w + 1]
return A
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp7 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 4.0
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 + tmp8
tmp17 = tmp16 * tmp10
tmp18 = tl.where(tmp5, tmp13, tmp17)
tmp19 = tl.where(tmp5, tmp14, tmp18)
tmp20 = tmp19 * tmp12
tmp21 = tl.where(tmp3, tmp20, tmp19)
tmp22 = tmp0 == tmp4
tmp24 = tmp23 + tmp8
tmp25 = tmp24 * tmp10
tmp26 = tl.where(tmp22, tmp13, tmp25)
tmp27 = tl.where(tmp22, tmp14, tmp26)
tmp28 = tl.where(tmp2, tmp20, tmp27)
tmp29 = tl.where(tmp2, tmp21, tmp28)
tmp30 = tmp29 + tmp8
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_view_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_abs_add_index_add_mul_relu_rsub_sub_view_2(
in_ptr0, in_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp100 = tl.load(in_ptr1 + x0, xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = tl.load(in_ptr0 + (4 * (x0 % 16) + 64 * (x0 // 64)), tmp3 &
xmask, eviction_policy='evict_last', other=0.0)
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 4.9999999998
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tmp0 >= tmp2
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (1 + 4 * (x0 % 16) + 64 * (x0 // 64)), tmp11 &
xmask, eviction_policy='evict_last', other=0.0)
tmp15 = triton_helpers.maximum(tmp14, tmp5)
tmp16 = triton_helpers.minimum(tmp15, tmp7)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp3, tmp10, tmp18)
tmp20 = tmp19.to(tl.int64)
tmp21 = tmp20 + tmp0
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp19 - tmp22
tmp24 = tl_math.abs(tmp23)
tmp25 = 1.0
tmp26 = tmp25 - tmp24
tmp27 = tl.full([1], 0, tl.int32)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = tmp2 < tmp2
tmp31 = tl.load(in_ptr0 + (4 * (x0 % 16) + 64 * (x0 // 64)), tmp30 &
xmask, eviction_policy='evict_last', other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp5)
tmp33 = triton_helpers.minimum(tmp32, tmp7)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tmp2 >= tmp2
tmp37 = tmp2 < tmp12
tmp38 = tl.load(in_ptr0 + (1 + 4 * (x0 % 16) + 64 * (x0 // 64)), tmp36 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = triton_helpers.maximum(tmp38, tmp5)
tmp40 = triton_helpers.minimum(tmp39, tmp7)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp36, tmp40, tmp41)
tmp43 = tl.where(tmp30, tmp35, tmp42)
tmp44 = tmp43.to(tl.int64)
tmp45 = tmp44 + tmp0
tmp46 = tmp45.to(tl.float32)
tmp47 = tmp43 - tmp46
tmp48 = tl_math.abs(tmp47)
tmp49 = tmp25 - tmp48
tmp50 = triton_helpers.maximum(tmp27, tmp49)
tmp51 = tmp28 * tmp50
tmp52 = x0 // 64
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp3, tmp52, tmp53)
tmp55 = tmp11 & tmp13
tmp56 = x0 // 16 % 4
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp55, tmp56, tmp57)
tmp59 = tmp0 >= tmp12
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp0 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = x0 // 4 % 4
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp62, tmp63, tmp64)
tmp66 = tmp0 >= tmp60
tmp67 = tl.full([1], 4, tl.int64)
tmp69 = x0 % 4
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp66, tmp69, tmp70)
tmp72 = tl.where(tmp62, tmp65, tmp71)
tmp73 = tl.where(tmp55, tmp58, tmp72)
tmp74 = tl.where(tmp3, tmp54, tmp73)
tmp75 = tmp74 * tmp67
tmp76 = tl.where(tmp30, tmp52, tmp53)
tmp77 = tmp36 & tmp37
tmp78 = tl.where(tmp77, tmp56, tmp57)
tmp79 = tmp2 >= tmp12
tmp80 = tmp2 < tmp60
tmp81 = tmp79 & tmp80
tmp82 = tl.where(tmp81, tmp63, tmp64)
tmp83 = tmp2 >= tmp60
tmp85 = tl.where(tmp83, tmp69, tmp70)
tmp86 = tl.where(tmp81, tmp82, tmp85)
tmp87 = tl.where(tmp77, tmp78, tmp86)
tmp88 = tl.where(tmp30, tmp76, tmp87)
tmp89 = tmp75 + tmp88
tmp90 = tl.full([1], 7, tl.int64)
tmp91 = tmp89 * tmp90
tmp92 = tmp91 + tmp21
tmp93 = tmp92 * tmp90
tmp94 = tmp93 + tmp45
tmp95 = tl.full([XBLOCK], 784, tl.int32)
tmp96 = tmp94 + tmp95
tmp97 = tmp94 < 0
tmp98 = tl.where(tmp97, tmp96, tmp94)
tl.device_assert((0 <= tmp98) & (tmp98 < 784) | ~xmask,
'index out of bounds: 0 <= tmp98 < 784')
tmp101 = tmp100 * tmp51
tmp102 = tmp44 + tmp2
tmp103 = tmp102.to(tl.float32)
tmp104 = tmp43 - tmp103
tmp105 = tl_math.abs(tmp104)
tmp106 = tmp25 - tmp105
tmp107 = triton_helpers.maximum(tmp27, tmp106)
tmp108 = tmp28 * tmp107
tmp109 = tmp93 + tmp102
tmp110 = tmp109 + tmp95
tmp111 = tmp109 < 0
tmp112 = tl.where(tmp111, tmp110, tmp109)
tl.device_assert((0 <= tmp112) & (tmp112 < 784) | ~xmask,
'index out of bounds: 0 <= tmp112 < 784')
tmp114 = tmp100 * tmp108
tmp115 = tmp20 + tmp2
tmp116 = tmp115.to(tl.float32)
tmp117 = tmp19 - tmp116
tmp118 = tl_math.abs(tmp117)
tmp119 = tmp25 - tmp118
tmp120 = triton_helpers.maximum(tmp27, tmp119)
tmp121 = tmp120 * tmp50
tmp122 = tmp91 + tmp115
tmp123 = tmp122 * tmp90
tmp124 = tmp123 + tmp45
tmp125 = tmp124 + tmp95
tmp126 = tmp124 < 0
tmp127 = tl.where(tmp126, tmp125, tmp124)
tl.device_assert((0 <= tmp127) & (tmp127 < 784) | ~xmask,
'index out of bounds: 0 <= tmp127 < 784')
tmp129 = tmp100 * tmp121
tmp130 = tmp120 * tmp107
tmp131 = tmp123 + tmp102
tmp132 = tmp131 + tmp95
tmp133 = tmp131 < 0
tmp134 = tl.where(tmp133, tmp132, tmp131)
tl.device_assert((0 <= tmp134) & (tmp134 < 784) | ~xmask,
'index out of bounds: 0 <= tmp134 < 784')
tmp136 = tmp100 * tmp130
tl.atomic_add(out_ptr2 + tl.broadcast_to(tmp98, [XBLOCK]), tmp101,
xmask, sem='relaxed')
tl.atomic_add(out_ptr2 + tl.broadcast_to(tmp112, [XBLOCK]), tmp114,
xmask, sem='relaxed')
tl.atomic_add(out_ptr2 + tl.broadcast_to(tmp127, [XBLOCK]), tmp129,
xmask, sem='relaxed')
tl.atomic_add(out_ptr2 + tl.broadcast_to(tmp134, [XBLOCK]), tmp136,
xmask, sem='relaxed')
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_0[grid(256)](arg1_1, buf2, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg1_1
buf5 = empty_strided_cuda((784,), (1,), torch.float32)
triton_poi_fused_view_1[grid(784)](buf5, 784, XBLOCK=256, num_warps
=4, num_stages=1)
triton_poi_fused__to_copy_abs_add_index_add_mul_relu_rsub_sub_view_2[
grid(256)](buf2, arg0_1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
del buf2
return reinterpret_tensor(buf5, (4, 4, 4, 4), (196, 49, 7, 1), 8),
def ravel_multi_index(indices, shape):
indices_ravel = indices[0]
for i in range(1, len(indices)):
indices_ravel = indices_ravel * shape[i] + indices[i]
return indices_ravel
def add_repeated(t, indices, values):
shape = t.shape
t_ravel = t.view(t.numel())
indices_ravel = ravel_multi_index(indices, shape)
t_ravel.index_add_(0, indices_ravel, values)
def meshgrid_tensor(*sizes, normalize=True, device='cuda:0'):
if not normalize:
aranges = [torch.arange(cur_size, device=device) for cur_size in sizes]
grids = torch.meshgrid(aranges)
grid = torch.stack(grids, dim=-1)
else:
aranges = [torch.arange(cur_size, device=device).float() for
cur_size in sizes]
grids = torch.meshgrid(aranges)
grid = np.stack([(cur_grid / float(max(sizes[i] - 1, 1))) for i,
cur_grid in enumerate(grids)], dim=-1)
return grid
class InvGridSamplerNumeratorNew(nn.Module):
eps = 1e-10
def __init__(self, OH=None, OW=None):
super(InvGridSamplerNumeratorNew, self).__init__()
self.OH = OH
self.OW = OW
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EasyTry/coordinate_based_inpainting
|
InvGridSamplerNumerator
| false
| 8,054
|
[
"MIT"
] | 13
|
cbe0e3a58c8cb2054f0536a56f57264fd9967d63
|
https://github.com/EasyTry/coordinate_based_inpainting/tree/cbe0e3a58c8cb2054f0536a56f57264fd9967d63
|
TVLoss
|
import torch
import torch.nn as nn
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
"""
Total variation loss
https://github.com/jxgu1016/Total_Variation_Loss.pytorch
Args:
tv_loss_weight (int):
"""
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w
) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self, tv_loss_weight=1):
"""
Total variation loss
https://github.com/jxgu1016/Total_Variation_Loss.pytorch
Args:
tv_loss_weight (int):
"""
super(TVLossNew, self).__init__()
self.tv_loss_weight = tv_loss_weight
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
EKami/EzeeML
|
TVLoss
| false
| 8,055
|
[
"MIT"
] | 35
|
21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
https://github.com/EKami/EzeeML/tree/21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
GlobalMaxPool
|
import torch
from torch import nn
class GlobalMaxPool(nn.Module):
"""
Max pooling in an equivariant network
"""
def __init__(self):
"""
"""
super().__init__()
def forward(self, x):
"""
"""
mx = torch.max(torch.max(x, dim=-1, keepdim=True)[0], dim=-2,
keepdim=True)[0]
return mx
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + x0, tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class GlobalMaxPoolNew(nn.Module):
"""
Max pooling in an equivariant network
"""
def __init__(self):
"""
"""
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ElisevanderPol/symmetrizer
|
GlobalMaxPool
| false
| 8,056
|
[
"MIT"
] | 16
|
8dae02bee2ba7132ae4fb07e07020767d280842c
|
https://github.com/ElisevanderPol/symmetrizer/tree/8dae02bee2ba7132ae4fb07e07020767d280842c
|
adaILN
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.9,
using_moving_average=True, using_bn=False):
super(adaILN, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.num_features = num_features
if self.using_bn:
self.rho = Parameter(torch.Tensor(1, num_features, 3))
self.rho[:, :, 0].data.fill_(3)
self.rho[:, :, 1].data.fill_(1)
self.rho[:, :, 2].data.fill_(1)
self.register_buffer('running_mean', torch.zeros(1,
num_features, 1, 1))
self.register_buffer('running_var', torch.zeros(1, num_features,
1, 1))
self.running_mean.zero_()
self.running_var.zero_()
else:
self.rho = Parameter(torch.Tensor(1, num_features, 2))
self.rho[:, :, 0].data.fill_(3.2)
self.rho[:, :, 1].data.fill_(1)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
softmax = nn.Softmax(2)
rho = softmax(self.rho)
if self.using_bn:
if self.training:
bn_mean, bn_var = torch.mean(input, dim=[0, 2, 3], keepdim=True
), torch.var(input, dim=[0, 2, 3], keepdim=True)
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * bn_mean.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * bn_var.data)
else:
self.running_mean.add_(bn_mean.data)
self.running_var.add_(bn_mean.data ** 2 + bn_var.data)
else:
bn_mean = torch.autograd.Variable(self.running_mean)
bn_var = torch.autograd.Variable(self.running_var)
out_bn = (input - bn_mean) / torch.sqrt(bn_var + self.eps)
rho_0 = rho[:, :, 0]
rho_1 = rho[:, :, 1]
rho_2 = rho[:, :, 2]
rho_0 = rho_0.view(1, self.num_features, 1, 1)
rho_1 = rho_1.view(1, self.num_features, 1, 1)
rho_2 = rho_2.view(1, self.num_features, 1, 1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
rho_2 = rho_2.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln + rho_2 * out_bn
else:
rho_0 = rho[:, :, 0]
rho_1 = rho[:, :, 1]
rho_0 = rho_0.view(1, self.num_features, 1, 1)
rho_1 = rho_1.view(1, self.num_features, 1, 1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2
).unsqueeze(3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last'
)
tmp39 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = tmp26 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp27 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp30 / tmp33
tmp35 = tmp0 - tmp20
tmp36 = tmp35 / tmp25
tmp37 = tmp34 * tmp36
tmp38 = tmp32 / tmp33
tmp40 = tmp0 - tmp39
tmp42 = tmp40 / tmp41
tmp43 = tmp38 * tmp42
tmp44 = tmp37 + tmp43
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp44, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x0 = xindex % 16
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x4, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 2), (8, 2, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf6
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11,
primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_1[grid(16)](buf1,
buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16,
4, 1), torch.float32)
triton_poi_fused_add_div_mul_sub_2[grid(4096)](buf12, primals_3,
primals_4, buf13, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf12
del primals_4
return buf13, primals_1, primals_2, primals_3, buf1, buf5, buf7, buf11
class adaILNNew(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.9,
using_moving_average=True, using_bn=False):
super(adaILNNew, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.num_features = num_features
if self.using_bn:
self.rho = Parameter(torch.Tensor(1, num_features, 3))
self.rho[:, :, 0].data.fill_(3)
self.rho[:, :, 1].data.fill_(1)
self.rho[:, :, 2].data.fill_(1)
self.register_buffer('running_mean', torch.zeros(1,
num_features, 1, 1))
self.register_buffer('running_var', torch.zeros(1, num_features,
1, 1))
self.running_mean.zero_()
self.running_var.zero_()
else:
self.rho = Parameter(torch.Tensor(1, num_features, 2))
self.rho[:, :, 0].data.fill_(3.2)
self.rho[:, :, 1].data.fill_(1)
def forward(self, input_0, input_1, input_2):
primals_2 = self.rho
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Elvinky/IEGAN
|
adaILN
| false
| 8,057
|
[
"MIT"
] | 29
|
db072e38fb022b367da24d3210c59136fbad224e
|
https://github.com/Elvinky/IEGAN/tree/db072e38fb022b367da24d3210c59136fbad224e
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
"""policy-value network module"""
def __init__(self, board_width, board_height):
super(Net, self).__init__()
self.board_width = board_width
self.board_height = board_height
self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1)
self.act_fc1 = nn.Linear(4 * board_width * board_height,
board_width * board_height)
self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1)
self.val_fc1 = nn.Linear(2 * board_width * board_height, 64)
self.val_fc2 = nn.Linear(64, 1)
def forward(self, state_input):
x = F.relu(self.conv1(state_input))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x_act = F.relu(self.act_conv1(x))
x_act = x_act.view(-1, 4 * self.board_width * self.board_height)
x_act = F.log_softmax(self.act_fc1(x_act))
x_val = F.relu(self.val_conv1(x))
x_val = x_val.view(-1, 2 * self.board_width * self.board_height)
x_val = F.relu(self.val_fc1(x_val))
x_val = F.tanh(self.val_fc2(x_val))
return x_act, x_val
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'board_width': 4, 'board_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_per_fused__log_softmax_8(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 16 * x0), tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 8
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 2 * x2 + 32 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_tanh_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 64), (64, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2,), (1,))
assert_size_stride(primals_14, (64, 32), (32, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (1, 64), (64, 1))
assert_size_stride(primals_17, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 9)](primals_1, buf0, 128, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_4[grid(2048)](buf5, primals_2,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(4096)](buf7, primals_5,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_6[grid(8192)](buf9, primals_7,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(16, 16)](
buf10, primals_9, buf11, buf23, 16, 16, XBLOCK=16, YBLOCK=16,
num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64),
(64, 1), 0), reinterpret_tensor(primals_10, (64, 16), (1, 64),
0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused__log_softmax_8[grid(4)](buf12, buf15, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del buf12
buf16 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 2, 4, 4), (32, 1, 8, 2))
buf17 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(8, 16)](
buf16, primals_13, buf17, buf22, 8, 16, XBLOCK=16, YBLOCK=8,
num_warps=4, num_stages=1)
del buf16
del primals_13
buf18 = reinterpret_tensor(buf10, (4, 64), (64, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf17, (4, 32), (32, 1), 0),
reinterpret_tensor(primals_14, (32, 64), (1, 32), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_relu_10[grid(256)](buf19, primals_15, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_15
buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf19, reinterpret_tensor(primals_16, (64, 1), (1,
64), 0), out=buf20)
buf21 = buf20
del buf20
triton_poi_fused_tanh_11[grid(4)](buf21, primals_17, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_17
return (buf15, buf21, buf0, buf1, buf2, buf3, primals_8, primals_12,
buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 64), (64, 1), 0),
buf15, reinterpret_tensor(buf17, (4, 32), (32, 1), 0), buf19, buf21,
primals_16, primals_14, buf22, primals_10, buf23)
class NetNew(nn.Module):
"""policy-value network module"""
def __init__(self, board_width, board_height):
super(NetNew, self).__init__()
self.board_width = board_width
self.board_height = board_height
self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1)
self.act_fc1 = nn.Linear(4 * board_width * board_height,
board_width * board_height)
self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1)
self.val_fc1 = nn.Linear(2 * board_width * board_height, 64)
self.val_fc2 = nn.Linear(64, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.act_conv1.weight
primals_9 = self.act_conv1.bias
primals_10 = self.act_fc1.weight
primals_11 = self.act_fc1.bias
primals_12 = self.val_conv1.weight
primals_13 = self.val_conv1.bias
primals_14 = self.val_fc1.weight
primals_15 = self.val_fc1.bias
primals_16 = self.val_fc2.weight
primals_17 = self.val_fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0], output[1]
|
Dryeck/17-18-Reinforcement
|
Net
| false
| 8,058
|
[
"MIT"
] | 36
|
f5a289a96c0139758436ab6a5a589519af1178da
|
https://github.com/Dryeck/17-18-Reinforcement/tree/f5a289a96c0139758436ab6a5a589519af1178da
|
Conv1D
|
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
class Conv1D(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0,
bias=True):
super(Conv1D, self).__init__()
self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim,
kernel_size=kernel_size, padding=padding, stride=stride, bias=bias)
def forward(self, x):
x = x.transpose(1, 2)
x = self.conv1d(x)
return x.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class Conv1DNew(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0,
bias=True):
super(Conv1DNew, self).__init__()
self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim,
kernel_size=kernel_size, padding=padding, stride=stride, bias=bias)
def forward(self, input_0):
primals_2 = self.conv1d.weight
primals_3 = self.conv1d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EGO4D/episodic-memory
|
Conv1D
| false
| 8,059
|
[
"MIT"
] | 27
|
2a3464882cd4f665c358c1b05a6397339e33c2e1
|
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
|
AttBlockV2
|
import torch
import torch.nn as nn
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.0)
class AttBlockV2(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int', activation=
'linear'):
super().__init__()
self.activation = activation
self.att = nn.Conv1d(in_channels=in_features, out_channels=
out_features, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=in_features, out_channels=
out_features, kernel_size=1, stride=1, padding=0, bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
def forward(self, x):
norm_att = torch.softmax(torch.tanh(self.att(x)), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = libdevice.tanh(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = libdevice.tanh(tmp7)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = libdevice.tanh(tmp10)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_tanh_1[grid(64)](buf1, buf2, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf4 = extern_kernels.convolution(primals_3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_0[grid(64)](buf5, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(16)](buf3, buf5, buf6, 16, XBLOCK=
16, num_warps=1, num_stages=1)
return buf6, buf3, buf5, primals_1, primals_3, primals_4, buf1, buf3, buf5
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.0)
class AttBlockV2New(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int', activation=
'linear'):
super().__init__()
self.activation = activation
self.att = nn.Conv1d(in_channels=in_features, out_channels=
out_features, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=in_features, out_channels=
out_features, kernel_size=1, stride=1, padding=0, bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
def forward(self, input_0):
primals_1 = self.att.weight
primals_2 = self.att.bias
primals_4 = self.cla.weight
primals_5 = self.cla.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
|
EMUNES/Auto-Subtitle-File-Generation
|
AttBlockV2
| false
| 8,060
|
[
"Apache-2.0"
] | 33
|
535a6351f450b1970da50bbbf4cc6d2f442ec335
|
https://github.com/EMUNES/Auto-Subtitle-File-Generation/tree/535a6351f450b1970da50bbbf4cc6d2f442ec335
|
SigmoidFocalLoss
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class SigmoidFocalLoss(nn.Module):
def __init__(self, alpha: 'float'=-1, gamma: 'float'=2, reduction:
'str'='none'):
super(SigmoidFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, input, target):
p = torch.sigmoid(input)
ce_loss = F.binary_cross_entropy_with_logits(input, target,
reduction='none')
p_t = p * target + (1 - p) * (1 - target)
loss = ce_loss * (1 - p_t) ** self.gamma
if self.alpha >= 0:
alpha_t = self.alpha * target + (1 - self.alpha) * (1 - target)
loss = alpha_t * loss
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.sigmoid(tmp3)
tmp14 = tmp13 * tmp0
tmp15 = tmp1 - tmp13
tmp16 = tmp15 * tmp2
tmp17 = tmp14 + tmp16
tmp18 = tmp1 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tmp12 * tmp19
tl.store(out_ptr0 + x0, tmp20, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0[
grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SigmoidFocalLossNew(nn.Module):
def __init__(self, alpha: 'float'=-1, gamma: 'float'=2, reduction:
'str'='none'):
super(SigmoidFocalLossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EryiXie/PlaneRecNet
|
SigmoidFocalLoss
| false
| 8,061
|
[
"MIT"
] | 34
|
534e23e6c5db2235ab1e5a9419fb4bfec3ffa943
|
https://github.com/EryiXie/PlaneRecNet/tree/534e23e6c5db2235ab1e5a9419fb4bfec3ffa943
|
eSEModule
|
import torch
from torch import nn
import torch.nn.functional as F
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_1[grid(256)](
primals_1, buf2, primals_3, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_2[grid(16)](buf2,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del primals_3
return buf3, primals_1, primals_2, buf1, buf4
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModuleNew(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EricFH/SOR
|
eSEModule
| false
| 8,062
|
[
"Apache-2.0"
] | 14
|
d644469da16169dd269c6ecaac51b1762649e17a
|
https://github.com/EricFH/SOR/tree/d644469da16169dd269c6ecaac51b1762649e17a
|
ILN
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class ILN(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.9,
using_moving_average=True, using_bn=False):
super(ILN, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.num_features = num_features
if self.using_bn:
self.rho = Parameter(torch.Tensor(1, num_features, 3))
self.rho[:, :, 0].data.fill_(1)
self.rho[:, :, 1].data.fill_(3)
self.rho[:, :, 2].data.fill_(3)
self.register_buffer('running_mean', torch.zeros(1,
num_features, 1, 1))
self.register_buffer('running_var', torch.zeros(1, num_features,
1, 1))
self.running_mean.zero_()
self.running_var.zero_()
else:
self.rho = Parameter(torch.Tensor(1, num_features, 2))
self.rho[:, :, 0].data.fill_(1)
self.rho[:, :, 1].data.fill_(3.2)
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
softmax = nn.Softmax(2)
rho = softmax(self.rho)
if self.using_bn:
if self.training:
bn_mean, bn_var = torch.mean(input, dim=[0, 2, 3], keepdim=True
), torch.var(input, dim=[0, 2, 3], keepdim=True)
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * bn_mean.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * bn_var.data)
else:
self.running_mean.add_(bn_mean.data)
self.running_var.add_(bn_mean.data ** 2 + bn_var.data)
else:
bn_mean = torch.autograd.Variable(self.running_mean)
bn_var = torch.autograd.Variable(self.running_var)
out_bn = (input - bn_mean) / torch.sqrt(bn_var + self.eps)
rho_0 = rho[:, :, 0]
rho_1 = rho[:, :, 1]
rho_2 = rho[:, :, 2]
rho_0 = rho_0.view(1, self.num_features, 1, 1)
rho_1 = rho_1.view(1, self.num_features, 1, 1)
rho_2 = rho_2.view(1, self.num_features, 1, 1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
rho_2 = rho_2.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln + rho_2 * out_bn
else:
rho_0 = rho[:, :, 0]
rho_1 = rho[:, :, 1]
rho_0 = rho_0.view(1, self.num_features, 1, 1)
rho_1 = rho_1.view(1, self.num_features, 1, 1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1
) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last'
)
tmp39 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = tmp26 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp27 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp30 / tmp33
tmp35 = tmp0 - tmp20
tmp36 = tmp35 / tmp25
tmp37 = tmp34 * tmp36
tmp38 = tmp32 / tmp33
tmp40 = tmp0 - tmp39
tmp42 = tmp40 / tmp41
tmp43 = tmp38 * tmp42
tmp44 = tmp37 + tmp43
tmp46 = tmp44 * tmp45
tmp48 = tmp46 + tmp47
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp48, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 2), (8, 2, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf6
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11,
primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_1[grid(16)](buf1,
buf5, primals_1, primals_2, buf7, buf11, primals_3, primals_4,
buf12, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_4
return buf12, primals_1, primals_2, primals_3, buf1, buf5, buf7, buf11
class ILNNew(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.9,
using_moving_average=True, using_bn=False):
super(ILNNew, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.num_features = num_features
if self.using_bn:
self.rho = Parameter(torch.Tensor(1, num_features, 3))
self.rho[:, :, 0].data.fill_(1)
self.rho[:, :, 1].data.fill_(3)
self.rho[:, :, 2].data.fill_(3)
self.register_buffer('running_mean', torch.zeros(1,
num_features, 1, 1))
self.register_buffer('running_var', torch.zeros(1, num_features,
1, 1))
self.running_mean.zero_()
self.running_var.zero_()
else:
self.rho = Parameter(torch.Tensor(1, num_features, 2))
self.rho[:, :, 0].data.fill_(1)
self.rho[:, :, 1].data.fill_(3.2)
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input_0):
primals_2 = self.rho
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Elvinky/IEGAN
|
ILN
| false
| 8,063
|
[
"MIT"
] | 29
|
db072e38fb022b367da24d3210c59136fbad224e
|
https://github.com/Elvinky/IEGAN/tree/db072e38fb022b367da24d3210c59136fbad224e
|
Message_Passing_Unit_v2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Message_Passing_Unit_v2(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v2, self).__init__()
self.w = nn.Linear(fea_size, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, unary_term, pair_term):
if unary_term.size()[0] == 1 and pair_term.size()[0] > 1:
unary_term = unary_term.expand(pair_term.size()[0], unary_term.
size()[1])
if unary_term.size()[0] > 1 and pair_term.size()[0] == 1:
pair_term = pair_term.expand(unary_term.size()[0], pair_term.
size()[1])
gate = self.w(F.relu(unary_term)) * self.w(F.relu(pair_term))
gate = F.sigmoid(gate.sum(1))
output = pair_term * gate.expand(gate.size()[0], pair_term.size()[1])
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'fea_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 128 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.sigmoid(tmp6)
tmp8 = 1.0
tmp9 = tmp8 - tmp7
tmp10 = tmp7 * tmp9
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (128, 4), (4, 1))
assert_size_stride(primals_4, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(4, 128), (1, 4), 0), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_relu_0[grid(16)](primals_2, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3,
(4, 128), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_3
del primals_4
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1[grid(4)](buf1,
buf3, buf4, buf6, 4, 128, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](primals_2, buf4, buf5, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf4
return buf5, primals_2, buf0, buf1, buf2, buf3, buf6
class Message_Passing_Unit_v2New(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v2New, self).__init__()
self.w = nn.Linear(fea_size, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, input_0, input_1):
primals_3 = self.w.weight
primals_4 = self.w.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
EricssonResearch/scott-eu
|
Message_Passing_Unit_v2
| false
| 8,064
|
[
"Apache-2.0"
] | 19
|
aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
https://github.com/EricssonResearch/scott-eu/tree/aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
Gated_Recurrent_Unit
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Gated_Recurrent_Unit(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_Unit, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input, hidden):
output = self.wih(F.relu(input)) + self.whh(F.relu(hidden))
if self.dropout:
output = F.dropout(output, training=self.training)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fea_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_relu_0[grid(256)](primals_4, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf4, primals_3, buf3, primals_6,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_3
del primals_6
return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
class Gated_Recurrent_UnitNew(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_UnitNew, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_2 = self.wih.weight
primals_3 = self.wih.bias
primals_5 = self.whh.weight
primals_6 = self.whh.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
EricssonResearch/scott-eu
|
Gated_Recurrent_Unit
| false
| 8,065
|
[
"Apache-2.0"
] | 19
|
aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
https://github.com/EricssonResearch/scott-eu/tree/aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
WeightedPool
|
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
def mask_logits(inputs, mask, mask_value=-1e+30):
mask = mask.type(torch.float32)
return inputs + (1.0 - mask) * mask_value
class WeightedPool(nn.Module):
def __init__(self, dim):
super(WeightedPool, self).__init__()
weight = torch.empty(dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def forward(self, x, mask):
alpha = torch.tensordot(x, self.weight, dims=1)
alpha = mask_logits(alpha, mask=mask.unsqueeze(2))
alphas = nn.Softmax(dim=1)(alpha)
pooled_x = torch.matmul(x.transpose(1, 2), alphas)
pooled_x = pooled_x.squeeze(2)
return pooled_x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4 % 16
x3 = xindex // 64
x5 = xindex % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (16 + x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (32 + x4), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (48 + x4), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -1e+30
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp9 = tmp2 - tmp8
tmp10 = tmp9 * tmp4
tmp11 = tmp7 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp15 = tmp2 - tmp14
tmp16 = tmp15 * tmp4
tmp17 = tmp13 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp21 = tmp2 - tmp20
tmp22 = tmp21 * tmp4
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tl.store(out_ptr0 + x6, tmp24, xmask)
tl.store(out_ptr1 + x6, tmp35, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex // 4 % 64
x6 = xindex % 16
x7 = xindex // 64
x4 = xindex // 256
x8 = xindex % 64
x9 = xindex
tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x6 + 16 * x7), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (x8 + 64 * x4), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + (x8 + 64 * x4), xmask, eviction_policy=
'evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -1e+30
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tl.store(out_ptr0 + x9, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1),
torch.float32)
buf2 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_mul_rsub_0[grid(256)](buf0, primals_3,
buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](primals_2, buf3, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_add_mul_rsub_2[grid(1024)](buf0,
primals_3, buf1, buf2, buf4, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1), 0), out=buf5)
del buf4
return reinterpret_tensor(buf5, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), primals_3, buf0, reinterpret_tensor(buf3, (64, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
def mask_logits(inputs, mask, mask_value=-1e+30):
mask = mask.type(torch.float32)
return inputs + (1.0 - mask) * mask_value
class WeightedPoolNew(nn.Module):
def __init__(self, dim):
super(WeightedPoolNew, self).__init__()
weight = torch.empty(dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EGO4D/episodic-memory
|
WeightedPool
| false
| 8,066
|
[
"MIT"
] | 27
|
2a3464882cd4f665c358c1b05a6397339e33c2e1
|
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
|
Noise_injector
|
import torch
import torch.nn as nn
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
def weights_init(m):
classname = m.__class__.__name__
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
class Noise_injector(nn.Module):
def __init__(self, n_hidden, z_dim, num_channels, n_channels_out,
device='cpu'):
super(Noise_injector, self).__init__()
self.num_channels = num_channels
self.n_channels_out = n_channels_out
self.n_hidden = n_hidden
self.z_dim = z_dim
self.device = device
self.residual = nn.Linear(self.z_dim, self.n_hidden)
self.scale = nn.Linear(self.z_dim, self.n_hidden)
self.last_layer = nn.Conv2d(self.n_hidden, self.n_channels_out,
kernel_size=1)
self.residual.apply(weights_init)
self.scale.apply(weights_init)
self.last_layer.apply(init_weights_orthogonal_normal)
def forward(self, feature_map, z):
residual = self.residual(z).view(z.shape[0], self.n_hidden, 1, 1)
scale = self.scale(z).view(z.shape[0], self.n_hidden, 1, 1)
feature_map = (feature_map + residual) * (scale + 1e-05)
return self.last_layer(feature_map)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_hidden': 4, 'z_dim': 4, 'num_channels': 4,
'n_channels_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp2 * tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_3, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_6, buf0, buf1, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_8, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf1, buf2
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
def weights_init(m):
classname = m.__class__.__name__
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
class Noise_injectorNew(nn.Module):
def __init__(self, n_hidden, z_dim, num_channels, n_channels_out,
device='cpu'):
super(Noise_injectorNew, self).__init__()
self.num_channels = num_channels
self.n_channels_out = n_channels_out
self.n_hidden = n_hidden
self.z_dim = z_dim
self.device = device
self.residual = nn.Linear(self.z_dim, self.n_hidden)
self.scale = nn.Linear(self.z_dim, self.n_hidden)
self.last_layer = nn.Conv2d(self.n_hidden, self.n_channels_out,
kernel_size=1)
self.residual.apply(weights_init)
self.scale.apply(weights_init)
self.last_layer.apply(init_weights_orthogonal_normal)
def forward(self, input_0, input_1):
primals_1 = self.residual.weight
primals_2 = self.residual.bias
primals_3 = self.scale.weight
primals_5 = self.scale.bias
primals_7 = self.last_layer.weight
primals_8 = self.last_layer.bias
primals_4 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
EliasKassapis/CAR
|
Noise_injector
| false
| 8,067
|
[
"Apache-2.0"
] | 17
|
ff7ec86aab68c4b9ff8aea171244991bd132d487
|
https://github.com/EliasKassapis/CAR/tree/ff7ec86aab68c4b9ff8aea171244991bd132d487
|
PatchEmbed
|
import torch
from torch import nn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
_B, _C, _H, _W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 12288 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_3, (768,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
triton_poi_fused_1[grid(2304, 256)](primals_2, buf1, 2304, 256,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_2[grid(3072, 16)](buf2, primals_3,
buf3, 3072, 16, XBLOCK=16, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_3
return reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0
), buf0, buf1
class PatchEmbedNew(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, input_0):
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EscVM/EscVM_YT
|
PatchEmbed
| false
| 8,068
|
[
"Apache-2.0"
] | 19
|
0ff1c47d7604d2452d7c9c2edd9b2db66781670a
|
https://github.com/EscVM/EscVM_YT/tree/0ff1c47d7604d2452d7c9c2edd9b2db66781670a
|
Net
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 150)
self.fc2 = nn.Linear(150, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.adaptive_max_pool2d(self.conv2_drop(self.conv2(x)), 4))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = xindex // 30
x2 = xindex // 9000
x4 = xindex % 9000
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 9088 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 54080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 150
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_5(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (150, 320), (320, 1))
assert_size_stride(primals_7, (150,), (1,))
assert_size_stride(primals_8, (10, 150), (150, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(36000)](buf1,
buf2, buf3, 36000, XBLOCK=512, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 26, 26), (13520, 676, 26, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(54080)](buf5, primals_5, 54080,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = torch.ops.aten.adaptive_max_pool2d.default(buf5, [4, 4])
buf7 = buf6[0]
buf8 = buf6[1]
del buf6
buf9 = buf7
del buf7
buf16 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(1280)](buf9, buf16,
1280, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 150), (150, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (4, 320), (320, 1), 0),
reinterpret_tensor(primals_6, (320, 150), (1, 320), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_4[grid(600)](buf11, primals_7, 600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8,
(150, 10), (1, 150), 0), alpha=1, beta=1, out=buf12)
del primals_9
buf15 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_5[grid(4)](buf12, buf15, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf8, reinterpret_tensor(buf9, (4, 320), (320, 1), 0), buf11, buf15,
primals_8, primals_6, buf16)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 150)
self.fc2 = nn.Linear(150, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
EKami/EzeeML
|
Net
| false
| 8,069
|
[
"MIT"
] | 35
|
21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
https://github.com/EKami/EzeeML/tree/21753a0ede7cc1dc675a2dcd09b6306cea2cad56
|
ResidualBlock
|
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
from typing import Optional
def conv3x3(in_channels: 'int', out_channels: 'int', stride: 'int'=1
) ->nn.Conv2d:
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=1, bias=False)
class ResidualBlock(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', stride:
'int'=1, downsample: 'Optional[nn.Module]'=None) ->None:
super().__init__()
self.conv1 = conv3x3(in_channels, out_channels, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(out_channels, out_channels)
self.downsample = downsample
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3,
primals_1, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
def conv3x3(in_channels: 'int', out_channels: 'int', stride: 'int'=1
) ->nn.Conv2d:
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=1, bias=False)
class ResidualBlockNew(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', stride:
'int'=1, downsample: 'Optional[nn.Module]'=None) ->None:
super().__init__()
self.conv1 = conv3x3(in_channels, out_channels, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(out_channels, out_channels)
self.downsample = downsample
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EGO4D/episodic-memory
|
ResidualBlock
| false
| 8,070
|
[
"MIT"
] | 27
|
2a3464882cd4f665c358c1b05a6397339e33c2e1
|
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
|
LearnableSinusoidEncoding
|
import torch
import torch.nn as nn
class LearnableSinusoidEncoding(nn.Module):
"""Layer converts scalar input to Sinusoid Encoding with learnt scaling."""
def __init__(self, dim, max_timescale_init=10000):
"""Initialize layer.
Args:
dim: Dimensionality of the sinusoid encoding, should be dividable
by 2.
max_timescale_init: Maximum time scale used during initialization.
"""
super().__init__()
assert dim % 2 == 0
inv_freq = 1.0 / max_timescale_init ** (torch.arange(0, dim, 2).
float() / dim)
self.inv_freq = nn.Parameter(inv_freq, requires_grad=True)
def forward(self, x):
sinusoid_inp = torch.matmul(x[..., None], self.inv_freq[None, :])
emb = torch.stack((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb.view(*emb.shape[:-2], -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp12 = tl.load(in_ptr0 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (256, 1), (1, 1), 0
), reinterpret_tensor(primals_2, (1, 2), (2, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 2, 2), (256, 64, 16, 4, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(1024)](buf0, buf1, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), buf0, reinterpret_tensor(primals_1, (1, 256), (1, 1), 0)
class LearnableSinusoidEncodingNew(nn.Module):
"""Layer converts scalar input to Sinusoid Encoding with learnt scaling."""
def __init__(self, dim, max_timescale_init=10000):
"""Initialize layer.
Args:
dim: Dimensionality of the sinusoid encoding, should be dividable
by 2.
max_timescale_init: Maximum time scale used during initialization.
"""
super().__init__()
assert dim % 2 == 0
inv_freq = 1.0 / max_timescale_init ** (torch.arange(0, dim, 2).
float() / dim)
self.inv_freq = nn.Parameter(inv_freq, requires_grad=True)
def forward(self, input_0):
primals_2 = self.inv_freq
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ExpectationMax/Translational-Equivariant-Performers
|
LearnableSinusoidEncoding
| false
| 8,071
|
[
"MIT"
] | 10
|
c7a55af3b581426512eb4a57d3a13eb20e93fbd6
|
https://github.com/ExpectationMax/Translational-Equivariant-Performers/tree/c7a55af3b581426512eb4a57d3a13eb20e93fbd6
|
NormedLinear
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
import torch.nn.parallel
import torch.optim
import torch.utils.data
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
return buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class NormedLinearNew(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinearNew, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
EricZsy/BalancedKnowledgeDistillation
|
NormedLinear
| false
| 8,072
|
[
"MIT"
] | 22
|
88a2de840a3fc6eb2ee881c729f293b8e78714aa
|
https://github.com/EricZsy/BalancedKnowledgeDistillation/tree/88a2de840a3fc6eb2ee881c729f293b8e78714aa
|
CQConcatenate
|
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
def mask_logits(inputs, mask, mask_value=-1e+30):
mask = mask.type(torch.float32)
return inputs + (1.0 - mask) * mask_value
class Conv1D(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0,
bias=True):
super(Conv1D, self).__init__()
self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim,
kernel_size=kernel_size, padding=padding, stride=stride, bias=bias)
def forward(self, x):
x = x.transpose(1, 2)
x = self.conv1d(x)
return x.transpose(1, 2)
class WeightedPool(nn.Module):
def __init__(self, dim):
super(WeightedPool, self).__init__()
weight = torch.empty(dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def forward(self, x, mask):
alpha = torch.tensordot(x, self.weight, dims=1)
alpha = mask_logits(alpha, mask=mask.unsqueeze(2))
alphas = nn.Softmax(dim=1)(alpha)
pooled_x = torch.matmul(x.transpose(1, 2), alphas)
pooled_x = pooled_x.squeeze(2)
return pooled_x
class CQConcatenate(nn.Module):
def __init__(self, dim):
super(CQConcatenate, self).__init__()
self.weighted_pool = WeightedPool(dim=dim)
self.conv1d = Conv1D(in_dim=2 * dim, out_dim=dim, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, context, query, q_mask):
pooled_query = self.weighted_pool(query, q_mask)
_, c_seq_len, _ = context.shape
pooled_query = pooled_query.unsqueeze(1).repeat(1, c_seq_len, 1)
output = torch.cat([context, pooled_query], dim=2)
output = self.conv1d(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -1e+30
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp9 = tmp2 - tmp8
tmp10 = tmp9 * tmp4
tmp11 = tmp7 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp15 = tmp2 - tmp14
tmp16 = tmp15 * tmp4
tmp17 = tmp13 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp21 = tmp2 - tmp20
tmp22 = tmp21 * tmp4
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tl.store(out_ptr0 + x0, tmp24, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -1e+30
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = xindex // 8
x2 = xindex // 32
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 32
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 8
y1 = yindex // 8
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 8, 1), (8, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_mul_rsub_0[grid(4)](buf0, primals_3,
buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0)
del buf0
triton_poi_fused__softmax_add_mul_rsub_1[grid(16)](buf3, primals_3,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1,
4), 0), buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](primals_4, buf4, buf5, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_4
buf6 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
triton_poi_fused_convolution_3[grid(32, 4)](buf5, buf6, 32, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4), (16, 4, 1))
del buf6
buf8 = buf7
del buf7
triton_poi_fused_convolution_4[grid(64)](buf8, primals_6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
return reinterpret_tensor(buf8, (4, 4, 4), (16, 1, 4), 0
), primals_2, primals_5, buf3, reinterpret_tensor(buf5, (4, 8, 4),
(32, 1, 8), 0)
def mask_logits(inputs, mask, mask_value=-1e+30):
mask = mask.type(torch.float32)
return inputs + (1.0 - mask) * mask_value
class Conv1D(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0,
bias=True):
super(Conv1D, self).__init__()
self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim,
kernel_size=kernel_size, padding=padding, stride=stride, bias=bias)
def forward(self, x):
x = x.transpose(1, 2)
x = self.conv1d(x)
return x.transpose(1, 2)
class WeightedPool(nn.Module):
def __init__(self, dim):
super(WeightedPool, self).__init__()
weight = torch.empty(dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def forward(self, x, mask):
alpha = torch.tensordot(x, self.weight, dims=1)
alpha = mask_logits(alpha, mask=mask.unsqueeze(2))
alphas = nn.Softmax(dim=1)(alpha)
pooled_x = torch.matmul(x.transpose(1, 2), alphas)
pooled_x = pooled_x.squeeze(2)
return pooled_x
class CQConcatenateNew(nn.Module):
def __init__(self, dim):
super(CQConcatenateNew, self).__init__()
self.weighted_pool = WeightedPool(dim=dim)
self.conv1d = Conv1D(in_dim=2 * dim, out_dim=dim, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weighted_pool.weight
primals_5 = self.conv1d.conv1d.weight
primals_6 = self.conv1d.conv1d.bias
primals_2 = input_0
primals_4 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
EGO4D/episodic-memory
|
CQConcatenate
| false
| 8,073
|
[
"MIT"
] | 27
|
2a3464882cd4f665c358c1b05a6397339e33c2e1
|
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
|
FakeReLUM
|
import torch
import torch.utils.data
from torch.utils import data as data
import torch.nn as nn
from torch.nn import init as init
from torchvision.models import vgg as vgg
from torch import autograd as autograd
class FakeReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class FakeReLUM(nn.Module):
def forward(self, x):
return FakeReLU.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch.utils import data as data
import torch.nn as nn
from torch.nn import init as init
from torchvision.models import vgg as vgg
from torch import autograd as autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class FakeReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class FakeReLUMNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BCV-Uniandes/RSR
|
FakeReLUM
| false
| 8,074
|
[
"zlib-acknowledgement"
] | 14
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
https://github.com/BCV-Uniandes/RSR/tree/dad60eedd3560f2655e3d1ed444153ed2616af2e
|
FeedForward
|
import torch
import torch.nn as nn
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class FeedForward(nn.Module):
def __init__(self, dim, mult=4, dropout=0.0, activation=None, glu=False):
super().__init__()
activation = default(activation, nn.GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class FeedForwardNew(nn.Module):
def __init__(self, dim, mult=4, dropout=0.0, activation=None, glu=False):
super().__init__()
activation = default(activation, nn.GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, input_0):
primals_1 = self.w1.weight
primals_2 = self.w1.bias
primals_4 = self.w2.weight
primals_5 = self.w2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ExpectationMax/Translational-Equivariant-Performers
|
FeedForward
| false
| 8,075
|
[
"MIT"
] | 10
|
c7a55af3b581426512eb4a57d3a13eb20e93fbd6
|
https://github.com/ExpectationMax/Translational-Equivariant-Performers/tree/c7a55af3b581426512eb4a57d3a13eb20e93fbd6
|
FrameMaxPool
|
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
class FrameMaxPool(nn.Module):
def __init__(self, input_size, hidden_size, stride):
super(FrameMaxPool, self).__init__()
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.max_pool = nn.MaxPool1d(stride)
def forward(self, visual_input):
vis_h = torch.relu(self.vis_conv(visual_input))
vis_h = self.max_pool(vis_h)
return vis_h
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.full([1], 0, tl.int8)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp4, xmask)
tl.store(out_ptr2 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.int8)
buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_0[grid
(16)](buf1, primals_2, buf2, buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf3, (4, 4), (4, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf2, buf4
class FrameMaxPoolNew(nn.Module):
def __init__(self, input_size, hidden_size, stride):
super(FrameMaxPoolNew, self).__init__()
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.max_pool = nn.MaxPool1d(stride)
def forward(self, input_0):
primals_1 = self.vis_conv.weight
primals_2 = self.vis_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EGO4D/episodic-memory
|
FrameMaxPool
| false
| 8,076
|
[
"MIT"
] | 27
|
2a3464882cd4f665c358c1b05a6397339e33c2e1
|
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
|
FFN
|
import torch
from torch import nn as nn
import torch.nn.functional as F
class FFN(nn.Module):
def __init__(self, d_model, hidden_size=1024):
super().__init__()
self.ln1 = nn.Linear(d_model, hidden_size)
self.ln2 = nn.Linear(hidden_size, d_model)
def reset_params(self):
nn.init.xavier_normal_(self.ln1.weight.data)
nn.init.xavier_normal_(self.ln2.weight.data)
nn.init.constant_(self.ln1.bias.data, 0)
nn.init.constant_(self.ln2.bias.data, 0)
def forward(self, x, x_mas):
x = F.relu(self.ln1(x))
x = self.ln2(x)
return x * x_mas
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (1024, 4), (4, 1))
assert_size_stride(primals_2, (1024,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024,
1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf1,
primals_2, buf4, 65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0
), reinterpret_tensor(primals_4, (1024, 4), (1, 1024), 0), out=buf2
)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_mul_1[grid(256)](buf3, primals_5, primals_6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0), primals_4, buf4
class FFNNew(nn.Module):
def __init__(self, d_model, hidden_size=1024):
super().__init__()
self.ln1 = nn.Linear(d_model, hidden_size)
self.ln2 = nn.Linear(hidden_size, d_model)
def reset_params(self):
nn.init.xavier_normal_(self.ln1.weight.data)
nn.init.xavier_normal_(self.ln2.weight.data)
nn.init.constant_(self.ln1.bias.data, 0)
nn.init.constant_(self.ln2.bias.data, 0)
def forward(self, input_0, input_1):
primals_1 = self.ln1.weight
primals_2 = self.ln1.bias
primals_4 = self.ln2.weight
primals_5 = self.ln2.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
FFTYYY/RoR_relation_extraction
|
FFN
| false
| 8,077
|
[
"MIT"
] | 25
|
a099e98f3708a39debeed4dc522ff57c4f6b960d
|
https://github.com/FFTYYY/RoR_relation_extraction/tree/a099e98f3708a39debeed4dc522ff57c4f6b960d
|
TransposedConvLayer
|
import torch
from torch.optim import *
import torch.nn as nn
class TransposedConvLayer(nn.Module):
"""
Transposed convolutional layer to increase spatial resolution (x2) in a decoder.
Default: bias, ReLU, no downsampling, no batch norm.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding=0,
activation='relu', norm=None):
super(TransposedConvLayer, self).__init__()
bias = False if norm == 'BN' else True
self.transposed_conv2d = nn.ConvTranspose2d(in_channels,
out_channels, kernel_size, stride=2, padding=padding,
output_padding=1, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, x):
out = self.transposed_conv2d(x)
if self.norm in ['BN', 'IN']:
out = self.norm_layer(out)
if self.activation is not None:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.optim import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 11, 11), (484, 121, 11, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 11, 11), (484, 121, 11, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(1936)](buf1
, primals_2, buf2, 1936, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class TransposedConvLayerNew(nn.Module):
"""
Transposed convolutional layer to increase spatial resolution (x2) in a decoder.
Default: bias, ReLU, no downsampling, no batch norm.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding=0,
activation='relu', norm=None):
super(TransposedConvLayerNew, self).__init__()
bias = False if norm == 'BN' else True
self.transposed_conv2d = nn.ConvTranspose2d(in_channels,
out_channels, kernel_size, stride=2, padding=padding,
output_padding=1, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, input_0):
primals_1 = self.transposed_conv2d.weight
primals_2 = self.transposed_conv2d.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EvilPerfectionist/ssl_e2vid
|
TransposedConvLayer
| false
| 8,078
|
[
"MIT"
] | 24
|
84f7c7e59875f134e97c14ec423f396725e04be7
|
https://github.com/EvilPerfectionist/ssl_e2vid/tree/84f7c7e59875f134e97c14ec423f396725e04be7
|
LavaLoss
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class LavaLoss(nn.Module):
"""
Depth gradient Loss for instance segmentation
"""
def __init__(self):
super(LavaLoss, self).__init__()
pass
def forward(self, seg_masks, gradient_map):
gt_size = gradient_map.shape[1:]
seg_masks = F.interpolate(seg_masks.unsqueeze(0), size=gt_size,
mode='bilinear').squeeze(0)
lava_loss_per_img = seg_masks.mul(gradient_map)
loss = lava_loss_per_img.sum() / (gradient_map.sum() * seg_masks.
shape[0])
return loss
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_sum_0(
in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r2 = rindex // 16
r3 = rindex
tmp44 = tl.load(in_ptr1 + r3, None)
tmp0 = r1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1, 1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = r0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * r2), None,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * r2), None,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * r2), None,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * r2), None,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tmp45 = tmp43 * tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp48 = tl.sum(tmp46, 1)[:, None]
tmp49 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 4.0
tmp53 = tmp51 * tmp52
tmp54 = tmp48 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf3
del buf3
get_raw_stream(0)
triton_per_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_sum_0[
grid(1)](buf5, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf5,
class LavaLossNew(nn.Module):
"""
Depth gradient Loss for instance segmentation
"""
def __init__(self):
super(LavaLossNew, self).__init__()
pass
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EryiXie/PlaneRecNet
|
LavaLoss
| false
| 8,079
|
[
"MIT"
] | 34
|
534e23e6c5db2235ab1e5a9419fb4bfec3ffa943
|
https://github.com/EryiXie/PlaneRecNet/tree/534e23e6c5db2235ab1e5a9419fb4bfec3ffa943
|
Hswish
|
import torch
import torch.nn as nn
import torch.quantization
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.float_op = nn.quantized.FloatFunctional()
self.relu6 = nn.ReLU6(inplace=inplace)
def forward(self, x):
relu6 = self.relu6(self.float_op.add_scalar(x, 3.0))
return self.float_op.mul_scalar(relu6, 1 / 6.0)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.float_op = nn.quantized.FloatFunctional()
self.hsigmoid = Hsigmoid(inplace)
def forward(self, x):
return self.float_op.mul(x, self.hsigmoid(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.quantization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.float_op = nn.quantized.FloatFunctional()
self.relu6 = nn.ReLU6(inplace=inplace)
def forward(self, x):
relu6 = self.relu6(self.float_op.add_scalar(x, 3.0))
return self.float_op.mul_scalar(relu6, 1 / 6.0)
class HswishNew(nn.Module):
def __init__(self, inplace=True):
super(HswishNew, self).__init__()
self.float_op = nn.quantized.FloatFunctional()
self.hsigmoid = Hsigmoid(inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Edgecortix-Inc/pytorch_quantization
|
Hswish
| false
| 8,080
|
[
"Apache-2.0"
] | 13
|
ad7120439f473d539adec22930a8363bfb63e830
|
https://github.com/Edgecortix-Inc/pytorch_quantization/tree/ad7120439f473d539adec22930a8363bfb63e830
|
ResidualBlock
|
import torch
from torch.optim import *
import torch.nn as nn
class ResidualBlock(nn.Module):
"""
Residual block as in "Deep residual learning for image recognition", He et al. 2016.
Default: bias, ReLU, no downsampling, no batch norm, ConvLSTM.
"""
def __init__(self, in_channels, out_channels, stride=1, downsample=None,
norm=None, BN_momentum=0.1):
super(ResidualBlock, self).__init__()
bias = False if norm == 'BN' else True
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=bias)
self.norm = norm
if norm == 'BN':
self.bn1 = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
self.bn2 = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
elif norm == 'IN':
self.bn1 = nn.InstanceNorm2d(out_channels)
self.bn2 = nn.InstanceNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=bias)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
if self.norm in ['BN', 'IN']:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.norm in ['BN', 'IN']:
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.optim import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf3, primals_5, primals_1, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1, buf4
class ResidualBlockNew(nn.Module):
"""
Residual block as in "Deep residual learning for image recognition", He et al. 2016.
Default: bias, ReLU, no downsampling, no batch norm, ConvLSTM.
"""
def __init__(self, in_channels, out_channels, stride=1, downsample=None,
norm=None, BN_momentum=0.1):
super(ResidualBlockNew, self).__init__()
bias = False if norm == 'BN' else True
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=bias)
self.norm = norm
if norm == 'BN':
self.bn1 = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
self.bn2 = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
elif norm == 'IN':
self.bn1 = nn.InstanceNorm2d(out_channels)
self.bn2 = nn.InstanceNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=bias)
self.downsample = downsample
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
EvilPerfectionist/ssl_e2vid
|
ResidualBlock
| false
| 8,081
|
[
"MIT"
] | 24
|
84f7c7e59875f134e97c14ec423f396725e04be7
|
https://github.com/EvilPerfectionist/ssl_e2vid/tree/84f7c7e59875f134e97c14ec423f396725e04be7
|
BahdanauAttn
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BahdanauAttn(nn.Module):
def __init__(self, context_size, hidden_size):
super(BahdanauAttn, self).__init__()
self.hidden_size = hidden_size
self.context_size = context_size
self.attn_h = nn.Linear(self.hidden_size, self.context_size, bias=False
)
self.attn_e = nn.Linear(self.context_size, self.context_size, bias=
False)
self.v = nn.Parameter(torch.rand(self.context_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
self.softmax = nn.Softmax(dim=1)
def forward(self, hidden, encoder_outputs, ctx_mask=None):
"""
:param hidden:
previous hidden state of the decoder, in shape (1,B,H)
:param encoder_outputs:
encoder outputs from Encoder, in shape (S,B,C)
:return
attention energies in shape (B,S)
"""
max_len = encoder_outputs.size(0)
encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
encoder_outputs = encoder_outputs.transpose(0, 1)
attn_energies = self.score(H, encoder_outputs)
if ctx_mask is not None:
self.mask = (1 - ctx_mask.transpose(0, 1).data).byte()
attn_energies.data.masked_fill_(self.mask, -float('inf'))
return self.softmax(attn_energies).unsqueeze(1)
def score(self, hidden, encoder_outputs):
energy = F.tanh(self.attn_h(hidden) + self.attn_e(encoder_outputs))
energy = energy.transpose(2, 1)
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy.squeeze(1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'context_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(64)](buf3, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf6
return reinterpret_tensor(buf7, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), buf3, buf7, reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 4), 0)
class BahdanauAttnNew(nn.Module):
def __init__(self, context_size, hidden_size):
super(BahdanauAttnNew, self).__init__()
self.hidden_size = hidden_size
self.context_size = context_size
self.attn_h = nn.Linear(self.hidden_size, self.context_size, bias=False
)
self.attn_e = nn.Linear(self.context_size, self.context_size, bias=
False)
self.v = nn.Parameter(torch.rand(self.context_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
self.softmax = nn.Softmax(dim=1)
def score(self, hidden, encoder_outputs):
energy = F.tanh(self.attn_h(hidden) + self.attn_e(encoder_outputs))
energy = energy.transpose(2, 1)
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy.squeeze(1)
def forward(self, input_0, input_1):
primals_5 = self.v
primals_1 = self.attn_h.weight
primals_2 = self.attn_e.weight
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Eurus-Holmes/VAG-NMT
|
BahdanauAttn
| false
| 8,082
|
[
"Apache-2.0"
] | 12
|
38095c4a5477a0e7e2fa1592e8401aa9cddf2beb
|
https://github.com/Eurus-Holmes/VAG-NMT/tree/38095c4a5477a0e7e2fa1592e8401aa9cddf2beb
|
CharbonnierLoss
|
import functools
import torch
import torch.utils.data
from torch.nn import functional as F
from torch.utils import data as data
import torch.nn as nn
from torch.nn import init as init
from torchvision.models import vgg as vgg
from torch import autograd as autograd
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are 'none', 'mean' and 'sum'.
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean'):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
'none', 'mean' and 'sum'. Default: 'mean'.
Returns:
Tensor: Loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) > 1:
weight = weight.sum()
else:
weight = weight.sum() * loss.size(1)
loss = loss.sum() / weight
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
**kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction)
return loss
return wrapper
@weighted_loss
def charbonnier_loss(pred, target, eps=1e-12):
return torch.sqrt((pred - target) ** 2 + eps)
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12):
super(CharbonnierLoss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(pred, target, weight,
eps=self.eps, reduction=self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import functools
import torch.utils.data
from torch.nn import functional as F
from torch.utils import data as data
import torch.nn as nn
from torch.nn import init as init
from torchvision.models import vgg as vgg
from torch import autograd as autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-12
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tmp12 = 1.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are 'none', 'mean' and 'sum'.
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean'):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
'none', 'mean' and 'sum'. Default: 'mean'.
Returns:
Tensor: Loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) > 1:
weight = weight.sum()
else:
weight = weight.sum() * loss.size(1)
loss = loss.sum() / weight
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
**kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction)
return loss
return wrapper
@weighted_loss
def charbonnier_loss(pred, target, eps=1e-12):
return torch.sqrt((pred - target) ** 2 + eps)
class CharbonnierLossNew(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12):
super(CharbonnierLossNew, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
BCV-Uniandes/RSR
|
CharbonnierLoss
| false
| 8,083
|
[
"zlib-acknowledgement"
] | 14
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
https://github.com/BCV-Uniandes/RSR/tree/dad60eedd3560f2655e3d1ed444153ed2616af2e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.