entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
CrossEntropyLoss
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
def _is_long(x):
if hasattr(x, 'data'):
x = x.data
return isinstance(x, torch.LongTensor) or isinstance(x, torch.LongTensor)
def cross_entropy(inputs, target, weight=None, ignore_index=-100, reduction
='mean', smooth_eps=None, smooth_dist=None, from_logits=True):
"""cross entropy loss, with support for target distributions and label smoothing https://arxiv.org/abs/1512.00567"""
smooth_eps = smooth_eps or 0
if _is_long(target) and smooth_eps == 0:
if from_logits:
return F.cross_entropy(inputs, target, weight, ignore_index=
ignore_index, reduction=reduction)
else:
return F.nll_loss(inputs, target, weight, ignore_index=
ignore_index, reduction=reduction)
if from_logits:
lsm = F.log_softmax(inputs, dim=-1)
else:
lsm = inputs
masked_indices = None
num_classes = inputs.size(-1)
if _is_long(target) and ignore_index >= 0:
masked_indices = target.eq(ignore_index)
if smooth_eps > 0 and smooth_dist is not None:
if _is_long(target):
target = onehot(target, num_classes).type_as(inputs)
if smooth_dist.dim() < target.dim():
smooth_dist = smooth_dist.unsqueeze(0)
target.lerp_(smooth_dist, smooth_eps)
if weight is not None:
lsm = lsm * weight.unsqueeze(0)
if _is_long(target):
eps_sum = smooth_eps / num_classes
eps_nll = 1.0 - eps_sum - smooth_eps
likelihood = lsm.gather(dim=-1, index=target.unsqueeze(-1)).squeeze(-1)
loss = -(eps_nll * likelihood + eps_sum * lsm.sum(-1))
else:
loss = -(target * lsm).sum(-1)
if masked_indices is not None:
loss.masked_fill_(masked_indices, 0)
if reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
if masked_indices is None:
loss = loss.mean()
else:
loss = loss.sum() / float(loss.size(0) - masked_indices.sum())
return loss
class CrossEntropyLoss(nn.CrossEntropyLoss):
"""CrossEntropyLoss - with ability to recieve distrbution as targets, and optional label smoothing"""
def __init__(self, weight=None, ignore_index=-100, reduction='mean',
smooth_eps=None, smooth_dist=None, from_logits=True):
super(CrossEntropyLoss, self).__init__(weight=weight, ignore_index=
ignore_index, reduction=reduction)
self.smooth_eps = smooth_eps
self.smooth_dist = smooth_dist
self.from_logits = from_logits
def forward(self, input, target, smooth_dist=None):
if smooth_dist is None:
smooth_dist = self.smooth_dist
return cross_entropy(input, target, weight=self.weight,
ignore_index=self.ignore_index, reduction=self.reduction,
smooth_eps=self.smooth_eps, smooth_dist=smooth_dist,
from_logits=self.from_logits)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = 64.0
tmp32 = tmp30 / tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
def _is_long(x):
if hasattr(x, 'data'):
x = x.data
return isinstance(x, torch.LongTensor) or isinstance(x, torch.LongTensor)
def cross_entropy(inputs, target, weight=None, ignore_index=-100, reduction
='mean', smooth_eps=None, smooth_dist=None, from_logits=True):
"""cross entropy loss, with support for target distributions and label smoothing https://arxiv.org/abs/1512.00567"""
smooth_eps = smooth_eps or 0
if _is_long(target) and smooth_eps == 0:
if from_logits:
return F.cross_entropy(inputs, target, weight, ignore_index=
ignore_index, reduction=reduction)
else:
return F.nll_loss(inputs, target, weight, ignore_index=
ignore_index, reduction=reduction)
if from_logits:
lsm = F.log_softmax(inputs, dim=-1)
else:
lsm = inputs
masked_indices = None
num_classes = inputs.size(-1)
if _is_long(target) and ignore_index >= 0:
masked_indices = target.eq(ignore_index)
if smooth_eps > 0 and smooth_dist is not None:
if _is_long(target):
target = onehot(target, num_classes).type_as(inputs)
if smooth_dist.dim() < target.dim():
smooth_dist = smooth_dist.unsqueeze(0)
target.lerp_(smooth_dist, smooth_eps)
if weight is not None:
lsm = lsm * weight.unsqueeze(0)
if _is_long(target):
eps_sum = smooth_eps / num_classes
eps_nll = 1.0 - eps_sum - smooth_eps
likelihood = lsm.gather(dim=-1, index=target.unsqueeze(-1)).squeeze(-1)
loss = -(eps_nll * likelihood + eps_sum * lsm.sum(-1))
else:
loss = -(target * lsm).sum(-1)
if masked_indices is not None:
loss.masked_fill_(masked_indices, 0)
if reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
if masked_indices is None:
loss = loss.mean()
else:
loss = loss.sum() / float(loss.size(0) - masked_indices.sum())
return loss
class CrossEntropyLossNew(nn.CrossEntropyLoss):
"""CrossEntropyLoss - with ability to recieve distrbution as targets, and optional label smoothing"""
def __init__(self, weight=None, ignore_index=-100, reduction='mean',
smooth_eps=None, smooth_dist=None, from_logits=True):
super(CrossEntropyLossNew, self).__init__(weight=weight,
ignore_index=ignore_index, reduction=reduction)
self.smooth_eps = smooth_eps
self.smooth_dist = smooth_dist
self.from_logits = from_logits
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MutualMarkets/gap
|
CrossEntropyLoss
| false
| 8,589
|
[
"MIT"
] | 29
|
328b0b7bee1aad8738ddb0f94b4fe49b2e250034
|
https://github.com/MutualMarkets/gap/tree/328b0b7bee1aad8738ddb0f94b4fe49b2e250034
|
DaiNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DaiNet(nn.Module):
def __init__(self):
super(DaiNet, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 5)
self.dp = nn.Dropout(0.5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(12, 24, 3)
self.dp = nn.Dropout(0.5)
self.fc1 = nn.Linear(24 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 24 * 6 * 6)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 37632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 2352
x4 = xindex % 2352
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 2368 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 2432 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 13824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 24
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 3456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 24 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 24 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (12, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (24, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_5, (24,), (1,))
assert_size_stride(primals_6, (120, 864), (864, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 12, 28, 28), (9408, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(37632)](buf1, primals_2,
37632, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 12, 14, 14), (2368, 196, 14, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 12, 14, 14), (2432, 196, 14, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(9408)](buf1, buf2,
buf3, 9408, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 24, 12, 12), (3456, 144, 12, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(13824)](buf5, primals_5,
13824, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 24, 6, 6), (864, 36, 6, 1), torch.int8)
buf7 = empty_strided_cuda((4, 24, 6, 6), (864, 36, 6, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(3456)](buf5, buf6,
buf7, 3456, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 864), (864, 1), 0),
reinterpret_tensor(primals_6, (864, 120), (1, 864), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 864), (864, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class DaiNetNew(nn.Module):
def __init__(self):
super(DaiNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 5)
self.dp = nn.Dropout(0.5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(12, 24, 3)
self.dp = nn.Dropout(0.5)
self.fc1 = nn.Linear(24 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
MaxChanger/pytorch-cifar
|
DaiNet
| false
| 8,590
|
[
"MIT"
] | 20
|
217fd2cf7e603fe9a8d3d97f2085606bc43a356a
|
https://github.com/MaxChanger/pytorch-cifar/tree/217fd2cf7e603fe9a8d3d97f2085606bc43a356a
|
LayerNormGRUCell
|
import math
import torch
class LayerNormGRUCell(torch.nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(LayerNormGRUCell, self).__init__()
self.ln_i2h = torch.nn.LayerNorm(2 * hidden_size,
elementwise_affine=False)
self.ln_h2h = torch.nn.LayerNorm(2 * hidden_size,
elementwise_affine=False)
self.ln_cell_1 = torch.nn.LayerNorm(hidden_size, elementwise_affine
=False)
self.ln_cell_2 = torch.nn.LayerNorm(hidden_size, elementwise_affine
=False)
self.i2h = torch.nn.Linear(input_size, 2 * hidden_size, bias=bias)
self.h2h = torch.nn.Linear(hidden_size, 2 * hidden_size, bias=bias)
self.h_hat_W = torch.nn.Linear(input_size, hidden_size, bias=bias)
self.h_hat_U = torch.nn.Linear(hidden_size, hidden_size, bias=bias)
self.hidden_size = hidden_size
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, h):
h = h
h = h.view(h.size(0), -1)
x = x.view(x.size(0), -1)
i2h = self.i2h(x)
h2h = self.h2h(h)
i2h = self.ln_i2h(i2h)
h2h = self.ln_h2h(h2h)
preact = i2h + h2h
gates = preact[:, :].sigmoid()
z_t = gates[:, :self.hidden_size]
r_t = gates[:, -self.hidden_size:]
h_hat_first_half = self.h_hat_W(x)
h_hat_last_half = self.h_hat_U(h)
h_hat_first_half = self.ln_cell_1(h_hat_first_half)
h_hat_last_half = self.ln_cell_2(h_hat_last_half)
h_hat = torch.tanh(h_hat_first_half + torch.mul(r_t, h_hat_last_half))
h_t = torch.mul(1 - z_t, h) + torch.mul(z_t, h_hat)
h_t = h_t.view(h_t.size(0), -1)
return h_t
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1,
out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp17 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 8, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tl.where(xmask, tmp18, 0)
tmp21 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = tmp24 / tmp9
tmp26 = tmp18 - tmp25
tmp27 = tmp26 * tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.where(xmask, tmp28, 0)
tmp31 = tl.sum(tmp30, 1)[:, None]
tmp32 = tmp0 - tmp10
tmp33 = 8.0
tmp34 = tmp16 / tmp33
tmp35 = 1e-05
tmp36 = tmp34 + tmp35
tmp37 = libdevice.rsqrt(tmp36)
tmp38 = tmp32 * tmp37
tmp39 = tmp17 - tmp25
tmp40 = tmp31 / tmp33
tmp41 = tmp40 + tmp35
tmp42 = libdevice.rsqrt(tmp41)
tmp43 = tmp39 * tmp42
tmp44 = tmp38 + tmp43
tmp45 = tl.sigmoid(tmp44)
tl.store(out_ptr4 + (r1 + 8 * x0), tmp45, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_rsub_tanh_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp11 = tl.load(in_ptr5 + x2, xmask)
tmp12 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp13 = tmp11 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tmp10 * tmp15
tmp17 = tmp9 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp19 = tmp0 * tmp18
tmp20 = tmp4 + tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (8, 4), (4, 1))
assert_size_stride(primals_6, (8,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor(
primals_3, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor(
primals_5, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf8 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_native_layer_norm_sigmoid_0[grid(4)](buf0,
buf1, buf8, 4, 8, XBLOCK=1, num_warps=2, num_stages=1)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, primals_2, reinterpret_tensor(
primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_7
del primals_8
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, primals_1, reinterpret_tensor(
primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_10
del primals_9
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(4)](buf9, buf11, buf12, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(4)](buf10, buf13, buf14,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_rsub_tanh_2[grid(16)](buf8,
primals_1, buf9, buf11, buf12, buf10, buf13, buf14, buf15, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf11
del buf12
del buf13
del buf14
del buf8
return buf15, primals_1, primals_2, buf0, buf1, buf9, buf10
class LayerNormGRUCellNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(LayerNormGRUCellNew, self).__init__()
self.ln_i2h = torch.nn.LayerNorm(2 * hidden_size,
elementwise_affine=False)
self.ln_h2h = torch.nn.LayerNorm(2 * hidden_size,
elementwise_affine=False)
self.ln_cell_1 = torch.nn.LayerNorm(hidden_size, elementwise_affine
=False)
self.ln_cell_2 = torch.nn.LayerNorm(hidden_size, elementwise_affine
=False)
self.i2h = torch.nn.Linear(input_size, 2 * hidden_size, bias=bias)
self.h2h = torch.nn.Linear(hidden_size, 2 * hidden_size, bias=bias)
self.h_hat_W = torch.nn.Linear(input_size, hidden_size, bias=bias)
self.h_hat_U = torch.nn.Linear(hidden_size, hidden_size, bias=bias)
self.hidden_size = hidden_size
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_5 = self.h2h.weight
primals_6 = self.h2h.bias
primals_1 = self.h_hat_W.weight
primals_8 = self.h_hat_W.bias
primals_2 = self.h_hat_U.weight
primals_10 = self.h_hat_U.bias
primals_7 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
NeuroAI-PI/AI-Grand-Challenge-2021
|
LayerNormGRUCell
| false
| 8,591
|
[
"MIT"
] | 21
|
aed2c31ce90cafe15895a11fadb9d88abd0c8765
|
https://github.com/NeuroAI-PI/AI-Grand-Challenge-2021/tree/aed2c31ce90cafe15895a11fadb9d88abd0c8765
|
PositionalEncoding
|
import torch
import torch.nn as nn
import torch.optim
import torch.nn.init
class PositionalEncoding(nn.Module):
def __init__(self, emb_size: 'int', spatial_size: 'int'):
super(PositionalEncoding, self).__init__()
self.emb_size = emb_size
self.spatial_size = spatial_size
self.positions = nn.Parameter(torch.randn(self.emb_size, self.
spatial_size))
def forward(self, x):
x += self.positions
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_size': 4, 'spatial_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, buf0
class PositionalEncodingNew(nn.Module):
def __init__(self, emb_size: 'int', spatial_size: 'int'):
super(PositionalEncodingNew, self).__init__()
self.emb_size = emb_size
self.spatial_size = spatial_size
self.positions = nn.Parameter(torch.randn(self.emb_size, self.
spatial_size))
def forward(self, input_0):
primals_1 = self.positions
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
NimrodShabtay/transformers-dip
|
PositionalEncoding
| false
| 8,592
|
[
"MIT"
] | 25
|
61bc3008114ca950e7ea6341ae8ff317d9353f40
|
https://github.com/NimrodShabtay/transformers-dip/tree/61bc3008114ca950e7ea6341ae8ff317d9353f40
|
Multi_Head_Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_Attention(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_Attention, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
batch_size = x.size(0)
Q = self.fc_Q(x)
K = self.fc_K(x)
V = self.fc_V(x)
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
K = K.view(batch_size * self.num_head, -1, self.dim_head)
V = V.view(batch_size * self.num_head, -1, self.dim_head)
scale = K.size(-1) ** -0.5
context = self.attention(Q, K, V, scale)
context = context.view(batch_size, -1, self.dim_head * self.num_head)
out = self.fc(context)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_model': 4, 'num_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x5, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(
primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1),
0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1,
1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4,
1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1,
buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1,
buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf7
del buf8
del primals_11
return buf9, primals_1, primals_10, buf4, reinterpret_tensor(buf5, (4,
4), (4, 1), 0), buf6, primals_8, reinterpret_tensor(buf2, (16, 1, 1
), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0
), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0)
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_AttentionNew(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_AttentionNew, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, input_0):
primals_1 = self.fc_Q.weight
primals_3 = self.fc_Q.bias
primals_2 = self.fc_K.weight
primals_5 = self.fc_K.bias
primals_4 = self.fc_V.weight
primals_7 = self.fc_V.bias
primals_6 = self.fc.weight
primals_9 = self.fc.bias
primals_10 = self.layer_norm.weight
primals_11 = self.layer_norm.bias
primals_8 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
NTDXYG/Text-Classify-based-pytorch
|
Multi_Head_Attention
| false
| 8,593
|
[
"Apache-2.0"
] | 20
|
b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
https://github.com/NTDXYG/Text-Classify-based-pytorch/tree/b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
Mul
|
import torch
class Mul(torch.nn.Module):
def __init__(self, weight):
super(Mul, self).__init__()
self.weight = weight
def forward(self, x):
return x * self.weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MulNew(torch.nn.Module):
def __init__(self, weight):
super(MulNew, self).__init__()
self.weight = weight
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
NehzUx/autodl
|
Mul
| false
| 8,594
|
[
"Apache-2.0"
] | 25
|
c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
|
https://github.com/NehzUx/autodl/tree/c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
|
DeepSVDDLoss
|
import torch
from functools import reduce
import torch.nn as nn
class BaseModule(nn.Module):
"""
Implements the basic module.
All other modules inherit from this one
"""
def load_w(self, checkpoint_path):
"""
Loads a checkpoint into the state_dict.
:param checkpoint_path: the checkpoint file to be loaded.
"""
device = torch.device('cuda:' + '1')
self.load_state_dict(torch.load(checkpoint_path, map_location=device))
def __repr__(self):
"""
String representation
"""
good_old = super(BaseModule, self).__repr__()
addition = 'Total number of parameters: {:,}'.format(self.n_parameters)
return good_old + '\n' + addition
def __call__(self, *args, **kwargs):
return super(BaseModule, self).__call__(*args, **kwargs)
@property
def n_parameters(self):
"""
Number of parameters of the model.
"""
n_parameters = 0
for p in self.parameters():
if hasattr(p, 'mask'):
n_parameters += torch.sum(p.mask).item()
else:
n_parameters += reduce(mul, p.shape)
return int(n_parameters)
class DeepSVDDLoss(BaseModule):
"""
Implements the reconstruction loss.
"""
def __init__(self, c, R, nu, objective):
"""
Class constructor.
"""
super(DeepSVDDLoss, self).__init__()
self.c = c
self.R = R
self.nu = nu
self.objective = objective
def forward(self, x):
"""
Forward propagation.
:param x: the batch of input samples.
:param x_r: the batch of reconstructions.
:return: the mean reconstruction loss (averaged along the batch axis).
"""
dist = torch.sum((x - self.c) ** 2, dim=1)
if self.objective == 'soft-boundary':
scores = dist - self.R ** 2
loss = self.R ** 2 + 1 / self.nu * torch.mean(torch.max(torch.
zeros_like(scores), scores))
else:
loss = torch.mean(dist)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c': 4, 'R': 4, 'nu': 4, 'objective': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from functools import reduce
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 - tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 - tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_pow_sub_sum_0[grid(1)](buf1, arg0_1, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class BaseModule(nn.Module):
"""
Implements the basic module.
All other modules inherit from this one
"""
def load_w(self, checkpoint_path):
"""
Loads a checkpoint into the state_dict.
:param checkpoint_path: the checkpoint file to be loaded.
"""
device = torch.device('cuda:' + '1')
self.load_state_dict(torch.load(checkpoint_path, map_location=device))
def __repr__(self):
"""
String representation
"""
good_old = super(BaseModule, self).__repr__()
addition = 'Total number of parameters: {:,}'.format(self.n_parameters)
return good_old + '\n' + addition
def __call__(self, *args, **kwargs):
return super(BaseModule, self).__call__(*args, **kwargs)
@property
def n_parameters(self):
"""
Number of parameters of the model.
"""
n_parameters = 0
for p in self.parameters():
if hasattr(p, 'mask'):
n_parameters += torch.sum(p.mask).item()
else:
n_parameters += reduce(mul, p.shape)
return int(n_parameters)
class DeepSVDDLossNew(BaseModule):
"""
Implements the reconstruction loss.
"""
def __init__(self, c, R, nu, objective):
"""
Class constructor.
"""
super(DeepSVDDLossNew, self).__init__()
self.c = c
self.R = R
self.nu = nu
self.objective = objective
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
NjuHaoZhang/AutoregressModel-AE_VAD_CVPR2019
|
DeepSVDDLoss
| false
| 8,595
|
[
"MIT"
] | 12
|
b9843f34ecb59f908d78ddf977ee4670e0ed6cb4
|
https://github.com/NjuHaoZhang/AutoregressModel-AE_VAD_CVPR2019/tree/b9843f34ecb59f908d78ddf977ee4670e0ed6cb4
|
FFNLayer
|
import math
import torch
import torch.nn as nn
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class FFNLayer(nn.Module):
def __init__(self, input_dim, intermediate_dim, output_dim, dropout,
layer_norm=True):
super(FFNLayer, self).__init__()
self.fc1 = nn.Linear(input_dim, intermediate_dim)
if layer_norm:
self.ln = nn.LayerNorm(intermediate_dim)
else:
self.ln = None
self.dropout_func = nn.Dropout(dropout)
self.fc2 = nn.Linear(intermediate_dim, output_dim)
def forward(self, input):
inter = self.fc1(self.dropout_func(input))
inter_act = gelu(inter)
if self.ln:
inter_act = self.ln(inter_act)
return self.fc2(inter_act)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'intermediate_dim': 4, 'output_dim': 4,
'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mul_native_layer_norm_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_native_layer_norm_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 - tmp9
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp10 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_native_layer_norm_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_native_layer_norm_1[grid(256)](buf0,
buf1, buf2, primals_4, primals_5, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class FFNLayerNew(nn.Module):
def __init__(self, input_dim, intermediate_dim, output_dim, dropout,
layer_norm=True):
super(FFNLayerNew, self).__init__()
self.fc1 = nn.Linear(input_dim, intermediate_dim)
if layer_norm:
self.ln = nn.LayerNorm(intermediate_dim)
else:
self.ln = None
self.dropout_func = nn.Dropout(dropout)
self.fc2 = nn.Linear(intermediate_dim, output_dim)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.ln.weight
primals_5 = self.ln.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
NExTplusplus/tat-qa
|
FFNLayer
| false
| 8,596
|
[
"MIT"
] | 23
|
4ce5d8e637b80143de0d2492ecd4b861d6ba9a89
|
https://github.com/NExTplusplus/tat-qa/tree/4ce5d8e637b80143de0d2492ecd4b861d6ba9a89
|
MessagePassing
|
import torch
import torch._C
import torch.serialization
from torch import nn
from torch.nn import Parameter
def make_onehot_kernel(kernel_size, index):
"""
Make 2D one hot square kernel, i.e. h=w
k[kernel_size, kernel_size] = 0 except k.view(-1)[index] = 1
"""
kernel = torch.zeros(kernel_size, kernel_size)
kernel.view(-1)[index] = 1
return kernel.view(1, 1, kernel_size, kernel_size)
def make_spatial_kernel(kernel_size, bandwidth, isreshape=True):
"""
Make 2D square smoothness kernel, i.e. h=w
k = 1/bandwidth * exp(-(pj-pi)**2/(2*bandwidth**2))
pj, pi = location of pixel
"""
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
p_end = (kernel_size - 1) // 2
X = torch.linspace(-p_end, p_end, steps=kernel_size).expand(kernel_size,
kernel_size)
Y = X.clone().t()
kernel = torch.exp(-(X ** 2 + Y ** 2) / (2 * bandwidth ** 2))
kernel[p_end, p_end] = 0
if isreshape:
return kernel.view(1, 1, kernel_size, kernel_size)
return kernel
class GaussianMask(nn.Module):
"""
Break down Gaussian kernel (2nd part of appearance kernel) into CNN
kj = (I(j) - I(i))**2/2*bandwidth**2, j#i
but compute all maps instead of 1 kernel
"""
def __init__(self, in_channels, kernel_size, bandwidth, iskernel=True):
super(GaussianMask, self).__init__()
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
self.bandwidth = bandwidth
self.iskernel = iskernel
self.n_kernels = kernel_size ** 2 - 1
kernel_weight = self._make_kernel_weight(in_channels, kernel_size,
self.n_kernels)
padding = kernel_size // 2
self.conv = nn.Conv2d(in_channels, in_channels * self.n_kernels,
kernel_size, stride=1, padding=padding, groups=in_channels,
bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight.view_as(self.conv.weight))
def _make_kernel_weight(self, in_channels, kernel_size, n_kernels):
kernel_weight = torch.zeros(in_channels, n_kernels, kernel_size,
kernel_size)
for i in range(n_kernels):
index = i if i < n_kernels // 2 else i + 1
kernel_i = make_onehot_kernel(kernel_size, index)
kernel_weight[:, i, :] = kernel_i
return kernel_weight
def forward(self, X):
batch_size, in_channels, H, W = X.shape
Xj = self.conv(X).view(batch_size, in_channels, self.n_kernels, H, W)
if not self.iskernel:
return Xj
Xi = X.unsqueeze(dim=2)
K = (Xj - Xi) ** 2 / (2 * self.bandwidth ** 2)
K = torch.exp(-K)
return K
class SpatialFilter(nn.Module):
"""
Break down spatial filter (smoothest kernel) into CNN blocks
refer: https://arxiv.org/pdf/1210.5644.pdf
"""
def __init__(self, n_classes, kernel_size, theta_gamma):
super(SpatialFilter, self).__init__()
padding = kernel_size // 2
kernel_weight = make_spatial_kernel(kernel_size, theta_gamma)
self.conv = nn.Conv2d(n_classes, n_classes, kernel_size, stride=1,
padding=padding, groups=n_classes, bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight)
def forward(self, Q):
Qtilde = self.conv(Q)
norm_weight = self.conv(Q.new_ones(*Q.shape, requires_grad=False))
Qtilde = Qtilde / norm_weight
return Qtilde
class BilateralFilter(nn.Module):
"""
Break down bilateral filter (appearance kernel) into CNN blocks
remember that exp(-a-b) =exp(-a)*exp(b)
"""
def __init__(self, in_channels, n_classes, kernel_size, theta_alpha,
theta_beta):
super(BilateralFilter, self).__init__()
kernel_weight = make_spatial_kernel(kernel_size, theta_alpha,
isreshape=False)
self.spatial_weight = Parameter(kernel_weight[kernel_weight > 0].
view(1, 1, 1, -1, 1, 1), requires_grad=False)
self.gauss_mask_I = GaussianMask(in_channels, kernel_size, theta_beta)
self.guass_mask_Q = GaussianMask(n_classes, kernel_size, 1,
iskernel=False)
def forward(self, Q, I):
Ij = self.gauss_mask_I(I)
Qj = self.guass_mask_Q(Q)
Qj = Ij.unsqueeze(dim=2) * Qj.unsqueeze(dim=1)
Qj = Qj * self.spatial_weight
Qtilde = Qj.sum(dim=3)
norm_weight = Ij * self.spatial_weight.squeeze(dim=2)
norm_weight = norm_weight.sum(dim=2)
Qtilde = Qtilde / norm_weight.unsqueeze(dim=2)
return Qtilde
class MessagePassing(nn.Module):
"""
Combine bilateral filter (appearance filter)
and spatial filter to make message passing
"""
def __init__(self, in_channels, n_classes, kernel_size=[3], theta_alpha
=[2.0], theta_beta=[2.0], theta_gamma=[2.0]):
super(MessagePassing, self).__init__()
assert len(theta_alpha) == len(theta_beta
), 'theta_alpha and theta_beta have different lengths'
self.n_bilaterals, self.n_spatials = len(theta_alpha), len(theta_gamma)
for i in range(self.n_bilaterals):
self.add_module('bilateral{}'.format(i), BilateralFilter(
in_channels, n_classes, kernel_size[i], theta_alpha[i],
theta_beta[i]))
for i in range(self.n_spatials):
self.add_module('spatial{}'.format(i), SpatialFilter(n_classes,
kernel_size[i], theta_gamma[i]))
def _get_child(self, child_name):
return getattr(self, child_name)
def forward(self, Q, I):
filteredQ = []
for i in range(self.n_bilaterals):
tmp_bilateral = self._get_child('bilateral{}'.format(i))(Q, I)
filteredQ.append(tmp_bilateral)
for i in range(self.n_spatials):
tmp_spatial = self._get_child('spatial{}'.format(i))(Q)
filteredQ.append(tmp_spatial.unsqueeze(dim=1))
Qtilde = torch.cat(filteredQ, dim=1)
return Qtilde
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch._C
import torch.serialization
from torch import nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 1024
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x1 = xindex // 4 % 16
x2 = xindex // 64 % 4
x3 = xindex // 256
x5 = xindex // 4
x6 = xindex % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (r4 + 8 * x2 + 32 * x1 + 512 * x3), xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (r4 + 8 * x6 + 512 * x3), xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr3 + r4, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.125
tmp5 = tmp3 * tmp4
tmp6 = -tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x7, tmp15, xmask)
@triton.jit
def triton_per_fused_div_exp_mul_neg_pow_sub_sum_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r3 + 8 * x1 + 32 * x0 + 512 * x2), xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.125
tmp5 = tmp3 * tmp4
tmp6 = -tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_new_ones_4(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 16
x3 = xindex // 16
y4 = yindex
x5 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x3 + 4 * x2 + 64 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 16 * y4), xmask & ymask, eviction_policy
='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (y0 + 5 * x5 + 320 * y1), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (5 * x1 + 80 * x0 + 320 * x2), tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 20
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = yindex // 5
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 5 * x2 + 320 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg4_1, (1, 1, 1, 8, 1, 1), (8, 8, 8, 1, 1, 1))
assert_size_stride(arg5_1, (4, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg0_1, buf0, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 1, 128, 32))
del arg1_1
buf2 = buf0
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_1[grid(16, 16)](arg2_1, buf2, buf6, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg2_1
buf3 = extern_kernels.convolution(buf2, arg3_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 1, 128, 32))
del arg3_1
buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 1, 16, 4),
torch.float32)
triton_per_fused_mul_sum_2[grid(1024)](buf1, arg0_1, buf3, arg4_1,
buf4, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf3
buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_per_fused_div_exp_mul_neg_pow_sub_sum_3[grid(256)](buf1,
arg0_1, arg4_1, buf5, 256, 8, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg4_1
del buf1
buf7 = extern_kernels.convolution(buf6, arg5_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 1, 16, 4))
buf8 = buf6
del buf6
triton_poi_fused_new_ones_4[grid(256)](buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, arg5_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4))
del arg5_1
del buf8
buf12 = empty_strided_cuda((4, 5, 4, 4, 4), (320, 1, 80, 20, 5),
torch.float32)
buf10 = reinterpret_tensor(buf12, (4, 4, 4, 4, 4), (320, 1, 80, 20,
5), 0)
triton_poi_fused_div_5[grid(16, 64)](buf4, buf5, buf10, 16, 64,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf4
del buf5
buf11 = reinterpret_tensor(buf12, (4, 1, 4, 4, 4), (320, 1, 80, 20,
5), 4)
triton_poi_fused_cat_6[grid(256)](buf7, buf9, buf11, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf7
del buf9
buf13 = empty_strided_cuda((4, 5, 4, 4, 4), (320, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_7[grid(20, 64)](buf12, buf13, 20, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf10
del buf11
del buf12
return buf13,
def make_onehot_kernel(kernel_size, index):
"""
Make 2D one hot square kernel, i.e. h=w
k[kernel_size, kernel_size] = 0 except k.view(-1)[index] = 1
"""
kernel = torch.zeros(kernel_size, kernel_size)
kernel.view(-1)[index] = 1
return kernel.view(1, 1, kernel_size, kernel_size)
def make_spatial_kernel(kernel_size, bandwidth, isreshape=True):
"""
Make 2D square smoothness kernel, i.e. h=w
k = 1/bandwidth * exp(-(pj-pi)**2/(2*bandwidth**2))
pj, pi = location of pixel
"""
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
p_end = (kernel_size - 1) // 2
X = torch.linspace(-p_end, p_end, steps=kernel_size).expand(kernel_size,
kernel_size)
Y = X.clone().t()
kernel = torch.exp(-(X ** 2 + Y ** 2) / (2 * bandwidth ** 2))
kernel[p_end, p_end] = 0
if isreshape:
return kernel.view(1, 1, kernel_size, kernel_size)
return kernel
class GaussianMask(nn.Module):
"""
Break down Gaussian kernel (2nd part of appearance kernel) into CNN
kj = (I(j) - I(i))**2/2*bandwidth**2, j#i
but compute all maps instead of 1 kernel
"""
def __init__(self, in_channels, kernel_size, bandwidth, iskernel=True):
super(GaussianMask, self).__init__()
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
self.bandwidth = bandwidth
self.iskernel = iskernel
self.n_kernels = kernel_size ** 2 - 1
kernel_weight = self._make_kernel_weight(in_channels, kernel_size,
self.n_kernels)
padding = kernel_size // 2
self.conv = nn.Conv2d(in_channels, in_channels * self.n_kernels,
kernel_size, stride=1, padding=padding, groups=in_channels,
bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight.view_as(self.conv.weight))
def _make_kernel_weight(self, in_channels, kernel_size, n_kernels):
kernel_weight = torch.zeros(in_channels, n_kernels, kernel_size,
kernel_size)
for i in range(n_kernels):
index = i if i < n_kernels // 2 else i + 1
kernel_i = make_onehot_kernel(kernel_size, index)
kernel_weight[:, i, :] = kernel_i
return kernel_weight
def forward(self, X):
batch_size, in_channels, H, W = X.shape
Xj = self.conv(X).view(batch_size, in_channels, self.n_kernels, H, W)
if not self.iskernel:
return Xj
Xi = X.unsqueeze(dim=2)
K = (Xj - Xi) ** 2 / (2 * self.bandwidth ** 2)
K = torch.exp(-K)
return K
class SpatialFilter(nn.Module):
"""
Break down spatial filter (smoothest kernel) into CNN blocks
refer: https://arxiv.org/pdf/1210.5644.pdf
"""
def __init__(self, n_classes, kernel_size, theta_gamma):
super(SpatialFilter, self).__init__()
padding = kernel_size // 2
kernel_weight = make_spatial_kernel(kernel_size, theta_gamma)
self.conv = nn.Conv2d(n_classes, n_classes, kernel_size, stride=1,
padding=padding, groups=n_classes, bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight)
def forward(self, Q):
Qtilde = self.conv(Q)
norm_weight = self.conv(Q.new_ones(*Q.shape, requires_grad=False))
Qtilde = Qtilde / norm_weight
return Qtilde
class BilateralFilter(nn.Module):
"""
Break down bilateral filter (appearance kernel) into CNN blocks
remember that exp(-a-b) =exp(-a)*exp(b)
"""
def __init__(self, in_channels, n_classes, kernel_size, theta_alpha,
theta_beta):
super(BilateralFilter, self).__init__()
kernel_weight = make_spatial_kernel(kernel_size, theta_alpha,
isreshape=False)
self.spatial_weight = Parameter(kernel_weight[kernel_weight > 0].
view(1, 1, 1, -1, 1, 1), requires_grad=False)
self.gauss_mask_I = GaussianMask(in_channels, kernel_size, theta_beta)
self.guass_mask_Q = GaussianMask(n_classes, kernel_size, 1,
iskernel=False)
def forward(self, Q, I):
Ij = self.gauss_mask_I(I)
Qj = self.guass_mask_Q(Q)
Qj = Ij.unsqueeze(dim=2) * Qj.unsqueeze(dim=1)
Qj = Qj * self.spatial_weight
Qtilde = Qj.sum(dim=3)
norm_weight = Ij * self.spatial_weight.squeeze(dim=2)
norm_weight = norm_weight.sum(dim=2)
Qtilde = Qtilde / norm_weight.unsqueeze(dim=2)
return Qtilde
class MessagePassingNew(nn.Module):
"""
Combine bilateral filter (appearance filter)
and spatial filter to make message passing
"""
def __init__(self, in_channels, n_classes, kernel_size=[3], theta_alpha
=[2.0], theta_beta=[2.0], theta_gamma=[2.0]):
super(MessagePassingNew, self).__init__()
assert len(theta_alpha) == len(theta_beta
), 'theta_alpha and theta_beta have different lengths'
self.n_bilaterals, self.n_spatials = len(theta_alpha), len(theta_gamma)
for i in range(self.n_bilaterals):
self.add_module('bilateral{}'.format(i), BilateralFilter(
in_channels, n_classes, kernel_size[i], theta_alpha[i],
theta_beta[i]))
for i in range(self.n_spatials):
self.add_module('spatial{}'.format(i), SpatialFilter(n_classes,
kernel_size[i], theta_gamma[i]))
def _get_child(self, child_name):
return getattr(self, child_name)
def forward(self, input_0, input_1):
arg4_1 = self.bilateral0.spatial_weight
arg1_1 = self.bilateral0.gauss_mask_I.conv.weight
arg3_1 = self.bilateral0.guass_mask_Q.conv.weight
arg5_1 = self.spatial0.conv.weight
arg0_1 = input_0
arg2_1 = input_1
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
|
Molly6/segmentation_shengteng2021
|
MessagePassing
| false
| 8,597
|
[
"Apache-2.0"
] | 21
|
33dfefa80193586f504069793d9e141944549e99
|
https://github.com/Molly6/segmentation_shengteng2021/tree/33dfefa80193586f504069793d9e141944549e99
|
MlpWithAttention
|
import torch
import torch.nn as nn
class Self_Attn1D(nn.Module):
""" Self attention Layer """
def __init__(self, in_dim, activation, k=8):
super(Self_Attn1D, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.key_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.value_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, return_attn=False):
"""
inputs :
x : input feature maps(B X C X T)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*T)
"""
B, C = x.size()
T = 1
x = x.view(B, C, T)
proj_query = self.query_conv(x).view(B, -1, T).permute(0, 2, 1)
proj_key = self.key_conv(x).view(B, -1, T)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(B, -1, T)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, T)
out = self.gamma * out + x
out = out.squeeze(2)
return out, attention
class MlpWithAttention(nn.Module):
def __init__(self, in_dim, out_dim):
super(MlpWithAttention, self).__init__()
out = max(8, in_dim * 2)
self.input = nn.Linear(in_dim, out)
self.output = nn.Linear(out, out_dim)
self.fc = nn.Linear(out, out)
self.fc2 = nn.Linear(out, out)
self.fc3 = nn.Linear(out, out)
self.attention = Self_Attn1D(out, nn.LeakyReLU)
self.attention2 = Self_Attn1D(out, nn.LeakyReLU)
self.relu = nn.LeakyReLU()
def forward(self, x):
x = x.float()
x = self.relu(self.input(x))
x, _ = self.attention(x)
x = self.relu(self.fc(x))
x, _ = self.attention2(x)
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
x = self.relu(self.output(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tmp0 - tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp2 / tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (8, 4), (4, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (8, 8, 1), (8, 1, 1))
assert_size_stride(primals_9, (8,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (8, 8), (8, 1))
assert_size_stride(primals_12, (8,), (1,))
assert_size_stride(primals_13, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_14, (1,), (1,))
assert_size_stride(primals_15, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_16, (1,), (1,))
assert_size_stride(primals_17, (8, 8, 1), (8, 1, 1))
assert_size_stride(primals_18, (8,), (1,))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (8, 8), (8, 1))
assert_size_stride(primals_21, (8,), (1,))
assert_size_stride(primals_22, (8, 8), (8, 1))
assert_size_stride(primals_23, (8,), (1,))
assert_size_stride(primals_24, (4, 8), (8, 1))
assert_size_stride(primals_25, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 8),
(1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(32)](buf0, primals_3, buf1, buf2,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 1
), (8, 1, 0), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf3, (4, 1, 1), (1, 1, 1))
buf4 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 1
), (8, 1, 0), 0), primals_6, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf4, (4, 1, 1), (1, 1, 1))
buf5 = buf3
del buf3
triton_poi_fused_convolution_1[grid(4)](buf5, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf6 = reinterpret_tensor(buf4, (4, 1, 1), (1, 4, 4), 0)
del buf4
triton_poi_fused_convolution_1[grid(4)](buf6, primals_7, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 1), (1, 0, 0), 0
), buf6, out=buf7)
buf8 = buf7
del buf7
triton_poi_fused__softmax_2[grid(4)](buf8, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf9 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 1
), (8, 1, 0), 0), primals_8, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf9, (4, 8, 1), (8, 1, 1))
buf10 = reinterpret_tensor(buf9, (4, 8, 1), (8, 1, 32), 0)
del buf9
triton_poi_fused_convolution_3[grid(32)](buf10, primals_9, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_9
buf11 = reinterpret_tensor(buf0, (4, 8, 1), (8, 1, 1), 0)
del buf0
extern_kernels.bmm(buf10, buf8, out=buf11)
buf12 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(32)](primals_10, buf11, buf2, buf12,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (4, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 8), (1, 8), 0), out=buf13)
buf14 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf15 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(32)](buf13, primals_12, buf14,
buf15, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_12
buf16 = extern_kernels.convolution(reinterpret_tensor(buf15, (4, 8,
1), (8, 1, 0), 0), primals_13, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf16, (4, 1, 1), (1, 1, 1))
buf17 = extern_kernels.convolution(reinterpret_tensor(buf15, (4, 8,
1), (8, 1, 0), 0), primals_15, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf17, (4, 1, 1), (1, 1, 1))
buf18 = buf16
del buf16
triton_poi_fused_convolution_1[grid(4)](buf18, primals_14, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_14
buf19 = reinterpret_tensor(buf17, (4, 1, 1), (1, 4, 4), 0)
del buf17
triton_poi_fused_convolution_1[grid(4)](buf19, primals_16, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_16
buf20 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf18, (4, 1, 1), (1, 0, 0),
0), buf19, out=buf20)
buf21 = buf20
del buf20
triton_poi_fused__softmax_2[grid(4)](buf21, 4, XBLOCK=4, num_warps=
1, num_stages=1)
buf22 = extern_kernels.convolution(reinterpret_tensor(buf15, (4, 8,
1), (8, 1, 0), 0), primals_17, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf22, (4, 8, 1), (8, 1, 1))
buf23 = reinterpret_tensor(buf22, (4, 8, 1), (8, 1, 32), 0)
del buf22
triton_poi_fused_convolution_3[grid(32)](buf23, primals_18, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_18
buf24 = reinterpret_tensor(buf13, (4, 8, 1), (8, 1, 1), 0)
del buf13
extern_kernels.bmm(buf23, buf21, out=buf24)
buf25 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(32)](primals_19, buf24, buf15,
buf25, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf25, (4, 8), (8, 1), 0),
reinterpret_tensor(primals_20, (8, 8), (1, 8), 0), out=buf26)
buf27 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf28 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(32)](buf26, primals_21, buf27,
buf28, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_21
buf29 = buf26
del buf26
extern_kernels.mm(buf28, reinterpret_tensor(primals_22, (8, 8), (1,
8), 0), out=buf29)
buf30 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf31 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(32)](buf29, primals_23, buf30,
buf31, 32, XBLOCK=32, num_warps=1, num_stages=1)
del buf29
del primals_23
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf31, reinterpret_tensor(primals_24, (8, 4), (1,
8), 0), out=buf32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_leaky_relu_5[grid(16)](buf32, primals_25, buf33,
buf34, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf32
del primals_25
return (buf34, primals_1, primals_4, primals_6, primals_8, primals_10,
primals_13, primals_15, primals_17, primals_19, buf1,
reinterpret_tensor(buf2, (4, 8, 1), (8, 1, 1), 0), buf8, buf11,
reinterpret_tensor(buf12, (4, 8), (8, 1), 0), buf14,
reinterpret_tensor(buf15, (4, 8, 1), (8, 1, 1), 0), buf21, buf24,
reinterpret_tensor(buf25, (4, 8), (8, 1), 0), buf27, buf28, buf30,
buf31, buf33, primals_24, primals_22, primals_20,
reinterpret_tensor(buf23, (4, 1, 8), (8, 1, 1), 0), buf18,
reinterpret_tensor(buf19, (4, 1, 1), (1, 1, 1), 0), primals_11,
reinterpret_tensor(buf10, (4, 1, 8), (8, 1, 1), 0), buf5,
reinterpret_tensor(buf6, (4, 1, 1), (1, 1, 1), 0))
class Self_Attn1D(nn.Module):
""" Self attention Layer """
def __init__(self, in_dim, activation, k=8):
super(Self_Attn1D, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.key_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.value_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, return_attn=False):
"""
inputs :
x : input feature maps(B X C X T)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*T)
"""
B, C = x.size()
T = 1
x = x.view(B, C, T)
proj_query = self.query_conv(x).view(B, -1, T).permute(0, 2, 1)
proj_key = self.key_conv(x).view(B, -1, T)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(B, -1, T)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, T)
out = self.gamma * out + x
out = out.squeeze(2)
return out, attention
class MlpWithAttentionNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(MlpWithAttentionNew, self).__init__()
out = max(8, in_dim * 2)
self.input = nn.Linear(in_dim, out)
self.output = nn.Linear(out, out_dim)
self.fc = nn.Linear(out, out)
self.fc2 = nn.Linear(out, out)
self.fc3 = nn.Linear(out, out)
self.attention = Self_Attn1D(out, nn.LeakyReLU)
self.attention2 = Self_Attn1D(out, nn.LeakyReLU)
self.relu = nn.LeakyReLU()
def forward(self, input_0):
primals_2 = self.input.weight
primals_3 = self.input.bias
primals_24 = self.output.weight
primals_25 = self.output.bias
primals_11 = self.fc.weight
primals_9 = self.fc.bias
primals_20 = self.fc2.weight
primals_12 = self.fc2.bias
primals_22 = self.fc3.weight
primals_18 = self.fc3.bias
primals_5 = self.attention.gamma
primals_4 = self.attention.query_conv.weight
primals_7 = self.attention.query_conv.bias
primals_6 = self.attention.key_conv.weight
primals_10 = self.attention.key_conv.bias
primals_8 = self.attention.value_conv.weight
primals_21 = self.attention.value_conv.bias
primals_14 = self.attention2.gamma
primals_13 = self.attention2.query_conv.weight
primals_16 = self.attention2.query_conv.bias
primals_15 = self.attention2.key_conv.weight
primals_19 = self.attention2.key_conv.bias
primals_17 = self.attention2.value_conv.weight
primals_23 = self.attention2.value_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0]
|
Malta-Lab/IUPE
|
MlpWithAttention
| false
| 8,600
|
[
"MIT"
] | 10
|
44ddf119917538f02bb69509fec7a8314eed419f
|
https://github.com/Malta-Lab/IUPE/tree/44ddf119917538f02bb69509fec7a8314eed419f
|
IWEncoder
|
import torch
from torch import nn
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(ConvMeanPool, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = self.conv(input)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(MeanPoolConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, input_height, input_width, input_depth = output.size()
output_depth = int(input_depth / self.block_size_sq)
output_width = int(input_width * self.block_size)
output_height = int(input_height * self.block_size)
t_1 = output.reshape(batch_size, input_height, input_width, self.
block_size_sq, output_depth)
spl = t_1.split(self.block_size, 3)
stacks = [t_t.reshape(batch_size, input_height, output_width,
output_depth) for t_t in spl]
output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).reshape(batch_size, output_height, output_width, output_depth)
output = output.permute(0, 3, 1, 2)
return output
class UpSampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
bias=True):
super(UpSampleConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init, bias=bias)
self.depth_to_space = DepthToSpace(2)
def forward(self, input):
output = input
output = torch.cat((output, output, output, output), 1)
output = self.depth_to_space(output)
output = self.conv(output)
return output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, resample=None, hw=64
):
super(ResidualBlock, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.kernel_size = kernel_size
self.resample = resample
self.bn1 = None
self.bn2 = None
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
if resample == 'down':
self.bn1 = nn.LayerNorm([input_dim, hw, hw])
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
elif resample == 'up':
self.bn1 = nn.BatchNorm2d(input_dim)
self.bn2 = nn.BatchNorm2d(output_dim)
elif resample is None:
self.bn1 = nn.BatchNorm2d(output_dim)
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
else:
raise Exception('invalid resample value')
if resample == 'down':
self.conv_shortcut = MeanPoolConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = ConvMeanPool(input_dim, output_dim, kernel_size=
kernel_size)
elif resample == 'up':
self.conv_shortcut = UpSampleConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = UpSampleConv(input_dim, output_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(output_dim, output_dim, kernel_size=
kernel_size)
elif resample is None:
self.conv_shortcut = IWConv2d(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(input_dim, output_dim, kernel_size=
kernel_size)
else:
raise Exception('invalid resample value')
def forward(self, input):
if self.input_dim == self.output_dim and self.resample is None:
shortcut = input
else:
shortcut = self.conv_shortcut(input)
output = input
output = self.bn1(output)
output = self.relu1(output)
output = self.conv_1(output)
output = self.bn2(output)
output = self.relu2(output)
output = self.conv_2(output)
return shortcut + output
class IWEncoder(nn.Module):
def __init__(self, input_size=64, z_dim=128, n_image_channels=3):
super(IWEncoder, self).__init__()
self.size = input_size
self.n_image_channels = n_image_channels
self.ssize = self.size // 16
self.conv1 = IWConv2d(n_image_channels, self.size, 3, he_init=False)
self.rb1 = ResidualBlock(self.size, 2 * self.size, 3, resample=
'down', hw=self.size)
self.rb2 = ResidualBlock(2 * self.size, 4 * self.size, 3, resample=
'down', hw=int(self.size / 2))
self.rb3 = ResidualBlock(4 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 4))
self.rb4 = ResidualBlock(8 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 8))
self.ln1 = nn.Linear(self.ssize * self.ssize * 8 * self.size, z_dim)
def forward(self, input):
output = input.contiguous()
output = output.view(-1, self.n_image_channels, self.size, self.size)
output = self.conv1(output)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = output.view(-1, self.ssize * self.ssize * 8 * self.size)
output = self.ln1(output)
output = torch.tanh(output)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_view_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_add_div_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 32
x1 = xindex // 32 % 64
x2 = xindex // 2048
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * (r3 % 64) + 4096 * ((r3 + 128 *
x1) // 64 % 64) + 262144 * x2 + (r3 + 128 * x1) // 4096), None,
eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 128
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 32
x1 = xindex // 32
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_13(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 32 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 32 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 262144.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_14(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 262144 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 4096 * y0), ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 4096 * y0), ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp10, ymask)
@triton.jit
def triton_poi_fused_add_convolution_div_15(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 128
x1 = xindex // 128 % 32
x2 = xindex // 4096
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 256 * x1 + 16384 * x2), None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (8192 + x0 + 256 * x1 + 16384 * x2), None)
tmp9 = tl.load(in_ptr1 + (128 + x0 + 256 * x1 + 16384 * x2), None)
tmp12 = tl.load(in_ptr1 + (8320 + x0 + 256 * x1 + 16384 * x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_div_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 16
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_17(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 16
x1 = xindex // 16 % 64
x2 = xindex // 1024
x4 = xindex
tmp0 = tl.load(in_ptr0 + (8 * x0 + 128 * (r3 % 32) + 4096 * ((r3 + 128 *
x1) // 32 % 32) + 131072 * x2 + (r3 + 128 * x1) // 1024), None,
eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_layer_norm_18(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_19(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 131072.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = 7.62939453125e-06
tmp22 = tmp20 * tmp21
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_20(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 131072
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 131072.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(in_out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_21(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 131072 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 128 * x2 + 131072 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_per_fused_native_layer_norm_22(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 131072.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 131072 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 128 * x2 + 131072 * y1), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_div_24(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x1 = xindex // 256 % 16
x2 = xindex // 4096
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 512 * x1 + 16384 * x2), None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (8192 + x0 + 512 * x1 + 16384 * x2), None)
tmp9 = tl.load(in_ptr1 + (256 + x0 + 512 * x1 + 16384 * x2), None)
tmp12 = tl.load(in_ptr1 + (8448 + x0 + 512 * x1 + 16384 * x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_div_25(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 8
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_26(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 8
x1 = xindex // 8 % 64
x2 = xindex // 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (32 * x0 + 256 * (r3 % 16) + 4096 * ((r3 + 128 *
x1) // 16 % 16) + 65536 * x2 + (r3 + 128 * x1) // 256), None,
eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_layer_norm_27(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 32
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 8
x1 = xindex // 8
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_28(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 65536.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = 1.52587890625e-05
tmp22 = tmp20 * tmp21
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_29(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 65536
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 65536.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(in_out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_30(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 256 * x2 + 65536 * y1), tmp6, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_31(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 65536.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_32(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 256 * x2 + 65536 * y1), tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_33(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x1 = xindex // 512 % 8
x2 = xindex // 4096
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 16384 * x2), None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (8192 + x0 + 1024 * x1 + 16384 * x2), None)
tmp9 = tl.load(in_ptr1 + (512 + x0 + 1024 * x1 + 16384 * x2), None)
tmp12 = tl.load(in_ptr1 + (8704 + x0 + 1024 * x1 + 16384 * x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_div_34(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512 % 4
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_35(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 1024
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 256
x1 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * (r2 % 64) + 32768 * x1 + r2 //
64), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
tl.store(out_ptr2 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_36(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_37(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = 3.0517578125e-05
tmp22 = tmp20 * tmp21
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_38(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 32768
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32768.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(in_out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_39(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_40(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_41(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 512 * x2 + 32768 * y1), tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_42(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y4 = yindex
y0 = yindex % 4
y5 = yindex // 4
y2 = yindex // 16
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + 512 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3 + 1024 * y0 + 8192 * y5), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (4096 + x3 + 1024 * y0 + 8192 * y5), xmask &
ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (512 + x3 + 1024 * y0 + 8192 * y5), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (4608 + x3 + 1024 * y0 + 8192 * y5), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(out_ptr0 + (y6 + 16 * x3 + 8192 * y2), tmp17, xmask & ymask)
@triton.jit
def triton_poi_fused_tanh_43(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (128, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_7, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_10, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_11, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_12, (128,), (1,))
assert_size_stride(primals_13, (256, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_14, (256,), (1,))
assert_size_stride(primals_15, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_16, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_17, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_18, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_19, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (512, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_25, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_28, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_29, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (512,), (1,))
assert_size_stride(primals_31, (512, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_32, (512,), (1,))
assert_size_stride(primals_33, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_34, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_35, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_36, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_37, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_38, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_39, (512,), (1,))
assert_size_stride(primals_40, (128, 8192), (8192, 1))
assert_size_stride(primals_41, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_2, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_1[grid(4096, 9)](primals_8, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_2[grid(8192, 9)](primals_11, buf2, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(16384, 9)](primals_17, buf3, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_17
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 9)](primals_20, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_5[grid(65536, 9)](primals_26, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf6 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(131072, 9)](primals_29, buf6, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_29
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_7[grid(262144, 9)](primals_35, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_35
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_7[grid(262144, 9)](primals_38, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_view_8[grid(12, 4096)](primals_1, buf9, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf10 = extern_kernels.convolution(buf9, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf11 = buf10
del buf10
triton_poi_fused_convolution_9[grid(1048576)](buf11, primals_3,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
triton_poi_fused_add_div_10[grid(262144)](buf11, buf12, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf14 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192,
8192, 1, 32), torch.float32)
buf15 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192,
8192, 1, 32), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192,
8192, 1, 32), torch.float32)
triton_per_fused_native_layer_norm_11[grid(8192)](buf11, buf14,
buf15, buf16, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf17 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1),
torch.float32)
buf18 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1),
torch.float32)
triton_per_fused_native_layer_norm_12[grid(128)](buf14, buf15,
buf16, buf17, buf18, buf19, 128, 64, XBLOCK=1, num_warps=2,
num_stages=1)
buf20 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf23 = reinterpret_tensor(buf21, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf21
triton_per_fused_native_layer_norm_13[grid(4)](buf23, buf17, buf18,
buf19, buf20, 4, 32, XBLOCK=1, num_warps=2, num_stages=1)
buf24 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_native_layer_norm_relu_14[grid(256, 4096)](buf11,
buf20, buf23, primals_6, primals_7, buf24, 256, 4096, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_7
buf25 = extern_kernels.convolution(buf24, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf26 = buf16
del buf16
buf27 = buf15
del buf15
buf28 = buf14
del buf14
triton_per_fused_native_layer_norm_11[grid(8192)](buf25, buf26,
buf27, buf28, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf29 = buf19
del buf19
buf30 = buf18
del buf18
buf31 = buf17
del buf17
triton_per_fused_native_layer_norm_12[grid(128)](buf26, buf27,
buf28, buf29, buf30, buf31, 128, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del buf26
del buf27
del buf28
buf32 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf35 = reinterpret_tensor(buf33, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf33
triton_per_fused_native_layer_norm_13[grid(4)](buf35, buf29, buf30,
buf31, buf32, 4, 32, XBLOCK=1, num_warps=2, num_stages=1)
del buf29
del buf30
del buf31
buf36 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_native_layer_norm_relu_14[grid(256, 4096)](buf25,
buf32, buf35, primals_9, primals_10, buf36, 256, 4096, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_10
buf37 = extern_kernels.convolution(buf36, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf38 = buf13
del buf13
triton_poi_fused_add_convolution_div_15[grid(524288)](buf38,
primals_5, buf37, primals_12, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf37
del primals_12
del primals_5
buf39 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.float32)
triton_poi_fused_add_div_16[grid(131072)](buf38, buf39, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf40 = extern_kernels.convolution(buf39, primals_13, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf41 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096,
4096, 1, 16), torch.float32)
buf42 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096,
4096, 1, 16), torch.float32)
buf43 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096,
4096, 1, 16), torch.float32)
triton_per_fused_native_layer_norm_17[grid(4096)](buf38, buf41,
buf42, buf43, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf44 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1),
torch.float32)
buf45 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1),
torch.float32)
buf46 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1),
torch.float32)
triton_per_fused_native_layer_norm_18[grid(64)](buf41, buf42, buf43,
buf44, buf45, buf46, 64, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf47 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf48 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf124 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_19[grid
(4)](buf44, buf45, buf46, buf47, buf48, buf124, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf50 = buf38
del buf38
triton_poi_fused_native_layer_norm_20[grid(524288)](buf50, buf47,
buf48, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf51 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128),
torch.float32)
triton_poi_fused_native_layer_norm_relu_21[grid(512, 1024)](buf50,
primals_15, primals_16, buf51, 512, 1024, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_16
buf52 = extern_kernels.convolution(buf51, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf53 = buf43
del buf43
buf54 = buf42
del buf42
buf55 = buf41
del buf41
triton_per_fused_native_layer_norm_17[grid(4096)](buf52, buf53,
buf54, buf55, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf56 = buf46
del buf46
buf57 = buf45
del buf45
buf58 = buf44
del buf44
triton_per_fused_native_layer_norm_18[grid(64)](buf53, buf54, buf55,
buf56, buf57, buf58, 64, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf53
del buf54
del buf55
buf59 = reinterpret_tensor(buf48, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf48
buf60 = buf47
del buf47
buf62 = reinterpret_tensor(buf60, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf60
triton_per_fused_native_layer_norm_22[grid(4)](buf62, buf56, buf57,
buf58, buf59, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf56
del buf57
del buf58
buf63 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128),
torch.float32)
triton_poi_fused_native_layer_norm_relu_23[grid(512, 1024)](buf52,
buf59, buf62, primals_18, primals_19, buf63, 512, 1024, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_19
buf64 = extern_kernels.convolution(buf63, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf65 = buf40
del buf40
triton_poi_fused_add_convolution_div_24[grid(262144)](buf65,
primals_14, buf64, primals_21, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf64
del primals_14
del primals_21
buf66 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
triton_poi_fused_add_div_25[grid(65536)](buf65, buf66, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf67 = extern_kernels.convolution(buf66, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf68 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048,
2048, 1, 8), torch.float32)
buf69 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048,
2048, 1, 8), torch.float32)
buf70 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048,
2048, 1, 8), torch.float32)
triton_per_fused_native_layer_norm_26[grid(2048)](buf65, buf68,
buf69, buf70, 2048, 128, XBLOCK=32, num_warps=8, num_stages=1)
buf71 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1),
torch.float32)
buf72 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1),
torch.float32)
buf73 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1),
torch.float32)
triton_per_fused_native_layer_norm_27[grid(32)](buf68, buf69, buf70,
buf71, buf72, buf73, 32, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf74 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf75 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf123 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_28[grid
(4)](buf71, buf72, buf73, buf74, buf75, buf123, 4, 8, XBLOCK=1,
num_warps=2, num_stages=1)
buf77 = buf65
del buf65
triton_poi_fused_native_layer_norm_29[grid(262144)](buf77, buf74,
buf75, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf78 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_layer_norm_relu_30[grid(1024, 256)](buf77,
primals_24, primals_25, buf78, 1024, 256, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_25
buf79 = extern_kernels.convolution(buf78, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf80 = buf70
del buf70
buf81 = buf69
del buf69
buf82 = buf68
del buf68
triton_per_fused_native_layer_norm_26[grid(2048)](buf79, buf80,
buf81, buf82, 2048, 128, XBLOCK=32, num_warps=8, num_stages=1)
buf83 = buf73
del buf73
buf84 = buf72
del buf72
buf85 = buf71
del buf71
triton_per_fused_native_layer_norm_27[grid(32)](buf80, buf81, buf82,
buf83, buf84, buf85, 32, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf80
del buf81
del buf82
buf86 = reinterpret_tensor(buf75, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf75
buf87 = buf74
del buf74
buf89 = reinterpret_tensor(buf87, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf87
triton_per_fused_native_layer_norm_31[grid(4)](buf89, buf83, buf84,
buf85, buf86, 4, 8, XBLOCK=1, num_warps=2, num_stages=1)
del buf83
del buf84
del buf85
buf90 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_layer_norm_relu_32[grid(1024, 256)](buf79,
buf86, buf89, primals_27, primals_28, buf90, 1024, 256, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_28
buf91 = extern_kernels.convolution(buf90, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf92 = buf67
del buf67
triton_poi_fused_add_convolution_div_33[grid(131072)](buf92,
primals_23, buf91, primals_30, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf91
del primals_23
del primals_30
buf93 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512),
torch.float32)
triton_poi_fused_add_div_34[grid(32768)](buf92, buf93, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
buf94 = extern_kernels.convolution(buf93, primals_31, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf94, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf95 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024,
1024, 64, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024,
1024, 64, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024,
1024, 64, 1), torch.float32)
triton_per_fused_native_layer_norm_35[grid(1024)](buf92, buf95,
buf96, buf97, 1024, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf98 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1),
torch.float32)
buf99 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1),
torch.float32)
buf100 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1),
torch.float32)
triton_per_fused_native_layer_norm_36[grid(16)](buf95, buf96, buf97,
buf98, buf99, buf100, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf101 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf102 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf122 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_37[grid
(4)](buf98, buf99, buf100, buf101, buf102, buf122, 4, 4, XBLOCK
=1, num_warps=2, num_stages=1)
buf104 = buf92
del buf92
triton_poi_fused_native_layer_norm_38[grid(131072)](buf104, buf101,
buf102, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf105 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_layer_norm_relu_39[grid(2048, 64)](buf104,
primals_33, primals_34, buf105, 2048, 64, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_34
buf106 = extern_kernels.convolution(buf105, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf106, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf107 = buf97
del buf97
buf108 = buf96
del buf96
buf109 = buf95
del buf95
triton_per_fused_native_layer_norm_35[grid(1024)](buf106, buf107,
buf108, buf109, 1024, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf110 = buf99
del buf99
buf111 = buf98
del buf98
buf112 = buf100
del buf100
triton_per_fused_native_layer_norm_36[grid(16)](buf107, buf108,
buf109, buf110, buf111, buf112, 16, 64, XBLOCK=8, num_warps=4,
num_stages=1)
del buf107
del buf108
del buf109
buf113 = reinterpret_tensor(buf102, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf102
buf114 = buf101
del buf101
buf116 = reinterpret_tensor(buf114, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf114
triton_per_fused_native_layer_norm_40[grid(4)](buf116, buf110,
buf111, buf112, buf113, 4, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf110
del buf111
del buf112
buf117 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_layer_norm_relu_41[grid(2048, 64)](buf106,
buf113, buf116, primals_36, primals_37, buf117, 2048, 64,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_37
buf118 = extern_kernels.convolution(buf117, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf119 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch
.float32)
triton_poi_fused_add_convolution_div_42[grid(64, 512)](buf94,
primals_32, buf118, primals_39, buf119, 64, 512, XBLOCK=4,
YBLOCK=64, num_warps=4, num_stages=1)
del buf118
del buf94
del primals_32
del primals_39
buf120 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf119, (4, 8192), (8192, 1),
0), reinterpret_tensor(primals_40, (8192, 128), (1, 8192), 0),
out=buf120)
buf121 = buf120
del buf120
triton_poi_fused_tanh_43[grid(512)](buf121, primals_41, 512, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_41
return (buf121, buf0, primals_4, primals_6, buf1, primals_9, buf2,
primals_13, primals_15, buf3, primals_18, buf4, primals_22,
primals_24, buf5, primals_27, buf6, primals_31, primals_33, buf7,
primals_36, buf8, buf9, buf11, buf12, buf20, buf23, buf24, buf25,
buf32, buf35, buf36, buf39, buf50, buf51, buf52, buf59, buf62,
buf63, buf66, buf77, buf78, buf79, buf86, buf89, buf90, buf93,
buf104, buf105, buf106, buf113, buf116, buf117, reinterpret_tensor(
buf119, (4, 8192), (8192, 1), 0), buf121, primals_40, buf122,
buf123, buf124)
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(ConvMeanPool, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = self.conv(input)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(MeanPoolConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, input_height, input_width, input_depth = output.size()
output_depth = int(input_depth / self.block_size_sq)
output_width = int(input_width * self.block_size)
output_height = int(input_height * self.block_size)
t_1 = output.reshape(batch_size, input_height, input_width, self.
block_size_sq, output_depth)
spl = t_1.split(self.block_size, 3)
stacks = [t_t.reshape(batch_size, input_height, output_width,
output_depth) for t_t in spl]
output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).reshape(batch_size, output_height, output_width, output_depth)
output = output.permute(0, 3, 1, 2)
return output
class UpSampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
bias=True):
super(UpSampleConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init, bias=bias)
self.depth_to_space = DepthToSpace(2)
def forward(self, input):
output = input
output = torch.cat((output, output, output, output), 1)
output = self.depth_to_space(output)
output = self.conv(output)
return output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, resample=None, hw=64
):
super(ResidualBlock, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.kernel_size = kernel_size
self.resample = resample
self.bn1 = None
self.bn2 = None
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
if resample == 'down':
self.bn1 = nn.LayerNorm([input_dim, hw, hw])
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
elif resample == 'up':
self.bn1 = nn.BatchNorm2d(input_dim)
self.bn2 = nn.BatchNorm2d(output_dim)
elif resample is None:
self.bn1 = nn.BatchNorm2d(output_dim)
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
else:
raise Exception('invalid resample value')
if resample == 'down':
self.conv_shortcut = MeanPoolConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = ConvMeanPool(input_dim, output_dim, kernel_size=
kernel_size)
elif resample == 'up':
self.conv_shortcut = UpSampleConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = UpSampleConv(input_dim, output_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(output_dim, output_dim, kernel_size=
kernel_size)
elif resample is None:
self.conv_shortcut = IWConv2d(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(input_dim, output_dim, kernel_size=
kernel_size)
else:
raise Exception('invalid resample value')
def forward(self, input):
if self.input_dim == self.output_dim and self.resample is None:
shortcut = input
else:
shortcut = self.conv_shortcut(input)
output = input
output = self.bn1(output)
output = self.relu1(output)
output = self.conv_1(output)
output = self.bn2(output)
output = self.relu2(output)
output = self.conv_2(output)
return shortcut + output
class IWEncoderNew(nn.Module):
def __init__(self, input_size=64, z_dim=128, n_image_channels=3):
super(IWEncoderNew, self).__init__()
self.size = input_size
self.n_image_channels = n_image_channels
self.ssize = self.size // 16
self.conv1 = IWConv2d(n_image_channels, self.size, 3, he_init=False)
self.rb1 = ResidualBlock(self.size, 2 * self.size, 3, resample=
'down', hw=self.size)
self.rb2 = ResidualBlock(2 * self.size, 4 * self.size, 3, resample=
'down', hw=int(self.size / 2))
self.rb3 = ResidualBlock(4 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 4))
self.rb4 = ResidualBlock(8 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 8))
self.ln1 = nn.Linear(self.ssize * self.ssize * 8 * self.size, z_dim)
def forward(self, input_0):
primals_2 = self.conv1.conv.weight
primals_3 = self.conv1.conv.bias
primals_6 = self.rb1.bn1.weight
primals_7 = self.rb1.bn1.bias
primals_9 = self.rb1.bn2.weight
primals_10 = self.rb1.bn2.bias
primals_4 = self.rb1.conv_shortcut.conv.conv.weight
primals_5 = self.rb1.conv_shortcut.conv.conv.bias
primals_8 = self.rb1.conv_1.conv.weight
primals_11 = self.rb1.conv_2.conv.conv.weight
primals_12 = self.rb1.conv_2.conv.conv.bias
primals_15 = self.rb2.bn1.weight
primals_16 = self.rb2.bn1.bias
primals_18 = self.rb2.bn2.weight
primals_19 = self.rb2.bn2.bias
primals_13 = self.rb2.conv_shortcut.conv.conv.weight
primals_14 = self.rb2.conv_shortcut.conv.conv.bias
primals_17 = self.rb2.conv_1.conv.weight
primals_20 = self.rb2.conv_2.conv.conv.weight
primals_21 = self.rb2.conv_2.conv.conv.bias
primals_24 = self.rb3.bn1.weight
primals_25 = self.rb3.bn1.bias
primals_27 = self.rb3.bn2.weight
primals_28 = self.rb3.bn2.bias
primals_22 = self.rb3.conv_shortcut.conv.conv.weight
primals_23 = self.rb3.conv_shortcut.conv.conv.bias
primals_26 = self.rb3.conv_1.conv.weight
primals_29 = self.rb3.conv_2.conv.conv.weight
primals_30 = self.rb3.conv_2.conv.conv.bias
primals_33 = self.rb4.bn1.weight
primals_34 = self.rb4.bn1.bias
primals_36 = self.rb4.bn2.weight
primals_37 = self.rb4.bn2.bias
primals_31 = self.rb4.conv_shortcut.conv.conv.weight
primals_32 = self.rb4.conv_shortcut.conv.conv.bias
primals_35 = self.rb4.conv_1.conv.weight
primals_38 = self.rb4.conv_2.conv.conv.weight
primals_39 = self.rb4.conv_2.conv.conv.bias
primals_40 = self.ln1.weight
primals_41 = self.ln1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41])
return output[0]
|
MIC-DKFZ/mood
|
IWEncoder
| false
| 8,601
|
[
"Apache-2.0"
] | 42
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
https://github.com/MIC-DKFZ/mood/tree/a01303adb4256653b133e2f7cd4741d366b681f7
|
ReconstructionLoss
|
import torch
from functools import reduce
import torch.nn as nn
class BaseModule(nn.Module):
"""
Implements the basic module.
All other modules inherit from this one
"""
def load_w(self, checkpoint_path):
"""
Loads a checkpoint into the state_dict.
:param checkpoint_path: the checkpoint file to be loaded.
"""
device = torch.device('cuda:' + '1')
self.load_state_dict(torch.load(checkpoint_path, map_location=device))
def __repr__(self):
"""
String representation
"""
good_old = super(BaseModule, self).__repr__()
addition = 'Total number of parameters: {:,}'.format(self.n_parameters)
return good_old + '\n' + addition
def __call__(self, *args, **kwargs):
return super(BaseModule, self).__call__(*args, **kwargs)
@property
def n_parameters(self):
"""
Number of parameters of the model.
"""
n_parameters = 0
for p in self.parameters():
if hasattr(p, 'mask'):
n_parameters += torch.sum(p.mask).item()
else:
n_parameters += reduce(mul, p.shape)
return int(n_parameters)
class ReconstructionLoss(BaseModule):
"""
Implements the reconstruction loss.
"""
def __init__(self):
"""
Class constructor.
"""
super(ReconstructionLoss, self).__init__()
def forward(self, x, x_r):
"""
Forward propagation.
:param x: the batch of input samples.
:param x_r: the batch of reconstructions.
:return: the mean reconstruction loss (averaged along the batch axis).
"""
L = torch.pow(x - x_r, 2)
while L.dim() > 1:
L = torch.sum(L, dim=-1)
return torch.mean(L)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from functools import reduce
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp49 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp53 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp59 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp63 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp68 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp69 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp74 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tmp22 + tmp26
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp27 + tmp31
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tmp32 + tmp36
tmp38 = tmp18 + tmp37
tmp41 = tmp39 - tmp40
tmp42 = tmp41 * tmp41
tmp45 = tmp43 - tmp44
tmp46 = tmp45 * tmp45
tmp47 = tmp42 + tmp46
tmp50 = tmp48 - tmp49
tmp51 = tmp50 * tmp50
tmp52 = tmp47 + tmp51
tmp55 = tmp53 - tmp54
tmp56 = tmp55 * tmp55
tmp57 = tmp52 + tmp56
tmp58 = tmp38 + tmp57
tmp61 = tmp59 - tmp60
tmp62 = tmp61 * tmp61
tmp65 = tmp63 - tmp64
tmp66 = tmp65 * tmp65
tmp67 = tmp62 + tmp66
tmp70 = tmp68 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tmp67 + tmp71
tmp75 = tmp73 - tmp74
tmp76 = tmp75 * tmp75
tmp77 = tmp72 + tmp76
tmp78 = tmp58 + tmp77
tl.store(out_ptr0 + x0, tmp78, xmask)
@triton.jit
def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 4.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(16)](arg0_1, arg1_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_mean_sum_1[grid(1)](buf2, buf0, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
return buf2,
class BaseModule(nn.Module):
"""
Implements the basic module.
All other modules inherit from this one
"""
def load_w(self, checkpoint_path):
"""
Loads a checkpoint into the state_dict.
:param checkpoint_path: the checkpoint file to be loaded.
"""
device = torch.device('cuda:' + '1')
self.load_state_dict(torch.load(checkpoint_path, map_location=device))
def __repr__(self):
"""
String representation
"""
good_old = super(BaseModule, self).__repr__()
addition = 'Total number of parameters: {:,}'.format(self.n_parameters)
return good_old + '\n' + addition
def __call__(self, *args, **kwargs):
return super(BaseModule, self).__call__(*args, **kwargs)
@property
def n_parameters(self):
"""
Number of parameters of the model.
"""
n_parameters = 0
for p in self.parameters():
if hasattr(p, 'mask'):
n_parameters += torch.sum(p.mask).item()
else:
n_parameters += reduce(mul, p.shape)
return int(n_parameters)
class ReconstructionLossNew(BaseModule):
"""
Implements the reconstruction loss.
"""
def __init__(self):
"""
Class constructor.
"""
super(ReconstructionLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NjuHaoZhang/AutoregressModel-AE_VAD_CVPR2019
|
ReconstructionLoss
| false
| 8,607
|
[
"MIT"
] | 12
|
b9843f34ecb59f908d78ddf977ee4670e0ed6cb4
|
https://github.com/NjuHaoZhang/AutoregressModel-AE_VAD_CVPR2019/tree/b9843f34ecb59f908d78ddf977ee4670e0ed6cb4
|
Mish
|
from torch.nn import Module
import torch
from torch import Tensor
import torch.optim
class Mish(Module):
"""
Mish Activation Layer
Applies a Mish activation function to the input
Inherits from:
Module (nn.module.Module)
"""
def __init__(self) ->None:
super().__init__()
def forward(self, x: 'Tensor') ->Tensor:
"""
Args:
x (Tensor): (batch_size, num_features)
Returns:
Tensor: (batch_size, num_features)
"""
return x * (1 + x.exp()).log().tanh()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_log_mul_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = libdevice.tanh(tmp4)
tmp6 = tmp0 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_log_mul_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MishNew(Module):
"""
Mish Activation Layer
Applies a Mish activation function to the input
Inherits from:
Module (nn.module.Module)
"""
def __init__(self) ->None:
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
PABannier/nanograd
|
Mish
| false
| 8,609
|
[
"MIT"
] | 18
|
5acd355c638885cbfc0fd0f1c4903964e7fb7de9
|
https://github.com/PABannier/nanograd/tree/5acd355c638885cbfc0fd0f1c4903964e7fb7de9
|
EdgeLoss
|
import torch
import torch.nn as nn
class EdgeLoss(nn.Module):
def __init__(self):
"""
Return Binary Entropy Loss with mean of all losses in each mini-batch
"""
super(EdgeLoss, self).__init__()
self.cross_entropy = nn.BCELoss(reduction='mean')
def forward(self, y, y_pred):
loss = self.cross_entropy(y, y_pred)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class EdgeLossNew(nn.Module):
def __init__(self):
"""
Return Binary Entropy Loss with mean of all losses in each mini-batch
"""
super(EdgeLossNew, self).__init__()
self.cross_entropy = nn.BCELoss(reduction='mean')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Nikronic/EdgeNet
|
EdgeLoss
| false
| 8,610
|
[
"MIT"
] | 12
|
ec649af303bd7d5397fd3d4cbf8736bd83756abb
|
https://github.com/Nikronic/EdgeNet/tree/ec649af303bd7d5397fd3d4cbf8736bd83756abb
|
CNNEncoder
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class CNNEncoder(nn.Module):
def __init__(self, out_channels: 'int', kernel_size: 'tuple'):
super(CNNEncoder, self).__init__()
self.cnn_encoder = nn.Conv2d(in_channels=1, out_channels=
out_channels, kernel_size=kernel_size)
def forward(self, x: 'torch.Tensor'):
x = x.unsqueeze(dim=1)
output = F.relu(self.cnn_encoder(x))
output = output.mean(dim=2)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_mean_relu_threshold_backward_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 1.0
tmp6 = tmp4 / tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (16, 16, 4, 1), 0), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_mean_relu_threshold_backward_0[grid(16)](
buf0, primals_3, buf1, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del primals_3
return buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4, 4), (16,
16, 4, 1), 0), buf2
class CNNEncoderNew(nn.Module):
def __init__(self, out_channels: 'int', kernel_size: 'tuple'):
super(CNNEncoderNew, self).__init__()
self.cnn_encoder = nn.Conv2d(in_channels=1, out_channels=
out_channels, kernel_size=kernel_size)
def forward(self, input_0):
primals_2 = self.cnn_encoder.weight
primals_3 = self.cnn_encoder.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
OwenLeng/Early-Detection-of-Fake-News-on-Social-Media-Through-Propagation-Path-Classification-with-pytorch-
|
CNNEncoder
| false
| 8,612
|
[
"MIT"
] | 38
|
39f8b7508240ebf58a3cdcf69fbb838a4239e0e5
|
https://github.com/OwenLeng/Early-Detection-of-Fake-News-on-Social-Media-Through-Propagation-Path-Classification-with-pytorch-/tree/39f8b7508240ebf58a3cdcf69fbb838a4239e0e5
|
_Mean
|
import torch
import torch.nn as nn
import torch.jit
class _Mean(nn.Module):
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return input.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
return buf1,
class _MeanNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
One-sixth/ms_ssim_pytorch
|
_Mean
| false
| 8,615
|
[
"MIT"
] | 42
|
6269c62e0dd29c91fa38e4ba73d906d0c84ca966
|
https://github.com/One-sixth/ms_ssim_pytorch/tree/6269c62e0dd29c91fa38e4ba73d906d0c84ca966
|
NetTan2018
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NetTan2018(nn.Module):
def __init__(self, in_channels=3, out_classes=2):
super(NetTan2018, self).__init__()
oc = 16
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=oc,
kernel_size=(3, 3), padding=0)
self.max1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=oc, out_channels=oc * 2,
kernel_size=(3, 3), padding=0)
self.max2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(in_channels=oc * 2, out_channels=oc * 2,
kernel_size=(3, 3), padding=0)
self.max3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(in_channels=oc * 2, out_channels=oc * 4,
kernel_size=(3, 3), padding=0)
self.conv5 = nn.Conv2d(in_channels=oc * 4, out_channels=oc * 4,
kernel_size=(3, 3), padding=0)
self.max5 = nn.MaxPool2d(2, 2)
self.conv6 = nn.Conv2d(in_channels=oc * 4, out_channels=oc * 8,
kernel_size=(3, 3), padding=0)
self.conv7 = nn.Conv2d(in_channels=oc * 8, out_channels=oc * 8,
kernel_size=(3, 3), padding=0)
self.hidden1 = nn.Linear(in_features=4 * 4 * 128, out_features=128)
self.hidden2 = nn.Linear(in_features=128, out_features=64)
self.final = nn.Linear(in_features=64, out_features=out_classes)
def forward(self, x):
x = self.max1(F.relu(self.conv1(x)))
x = self.max2(F.relu(self.conv2(x)))
x = self.max3(F.relu(self.conv3(x)))
x = self.max5(F.relu(self.conv5(F.relu(self.conv4(x)))))
x = F.relu(self.conv7(F.relu(self.conv6(x))))
x = x.view(-1, 4 * 4 * 128)
x = F.relu(self.hidden1(x))
x = F.relu(self.hidden2(x))
x = self.final(x)
return x
def get_inputs():
return [torch.rand([4, 3, 144, 144])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 48
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 20736
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 20736 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 62208 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1290496
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 322624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 71
x2 = xindex // 1136
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1 + 4544 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1 + 4544 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2272 + x0 + 32 * x1 + 4544 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (2288 + x0 + 32 * x1 + 4544 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 609408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32 % 34
x2 = xindex // 1088 % 34
x3 = xindex // 36992
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4416 * x2 + 152352 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 4416 * x2 + 152352 * x3),
xmask)
tmp3 = tl.load(in_ptr0 + (2208 + x0 + 64 * x1 + 4416 * x2 + 152352 * x3
), xmask)
tmp5 = tl.load(in_ptr0 + (2240 + x0 + 64 * x1 + 4416 * x2 + 152352 * x3
), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 16
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 64 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1056 + x0 + 64 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 6
x2 = xindex // 384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 1536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (768 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (832 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 512 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 512 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 144, 144), (62208, 20736, 144, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 2048), (2048, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (64, 128), (128, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (2, 64), (64, 1))
assert_size_stride(primals_21, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(48, 9)](primals_1, buf0, 48, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 144, 144), (62208, 1, 432, 3),
torch.float32)
triton_poi_fused_1[grid(12, 20736)](primals_3, buf1, 12, 20736,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(512, 9)](primals_4, buf2, 512, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_3[grid(1024, 9)](primals_6, buf3, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(2048, 9)](primals_8, buf4, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_5[grid(4096, 9)](primals_10, buf5, 4096, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_6[grid(8192, 9)](primals_12, buf6, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_7[grid(16384, 9)](primals_14, buf7, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 142, 142), (322624, 1, 2272, 16))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_8[grid(1290496)](buf9, primals_2,
1290496, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf10 = empty_strided_cuda((4, 16, 71, 71), (80656, 1, 1136, 16),
torch.float32)
buf11 = empty_strided_cuda((4, 16, 71, 71), (80656, 1, 1136, 16),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(322624)](buf9,
buf10, buf11, 322624, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 69, 69), (152352, 1, 2208, 32))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_10[grid(609408)](buf13, primals_5,
609408, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf14 = empty_strided_cuda((4, 32, 34, 34), (36992, 1, 1088, 32),
torch.float32)
buf15 = empty_strided_cuda((4, 32, 34, 34), (36992, 1, 1088, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_11[grid(147968)](buf13,
buf14, buf15, 147968, XBLOCK=512, num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf14, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_12[grid(131072)](buf17, primals_7,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf18 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.float32)
buf19 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(32768)](buf17,
buf18, buf19, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf20 = extern_kernels.convolution(buf18, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 14, 14), (12544, 1, 896, 64))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_14[grid(50176)](buf21, primals_9,
50176, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf22 = extern_kernels.convolution(buf21, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 12, 12), (9216, 1, 768, 64))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_15[grid(36864)](buf23, primals_11,
36864, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf24 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch
.float32)
buf25 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_16[grid(9216)](buf23,
buf24, buf25, 9216, XBLOCK=128, num_warps=4, num_stages=1)
buf26 = extern_kernels.convolution(buf24, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 4, 4), (2048, 1, 512, 128))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_17[grid(8192)](buf27, primals_13,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf28 = extern_kernels.convolution(buf27, buf7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 2, 2), (512, 1, 256, 128))
buf29 = empty_strided_cuda((4, 128, 2, 2), (512, 4, 2, 1), torch.
float32)
buf35 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_18[grid(512, 4)](
buf28, primals_15, buf29, buf35, 512, 4, XBLOCK=4, YBLOCK=64,
num_warps=4, num_stages=1)
del buf28
del primals_15
buf30 = empty_strided_cuda((1, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (1, 2048), (0, 1), 0),
reinterpret_tensor(primals_16, (2048, 128), (1, 2048), 0), out=
buf30)
buf31 = buf30
del buf30
triton_poi_fused_relu_19[grid(128)](buf31, primals_17, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_17
buf32 = empty_strided_cuda((1, 64), (64, 1), torch.float32)
extern_kernels.mm(buf31, reinterpret_tensor(primals_18, (128, 64),
(1, 128), 0), out=buf32)
buf33 = buf32
del buf32
triton_poi_fused_relu_20[grid(64)](buf33, primals_19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_19
buf34 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_21, buf33, reinterpret_tensor(
primals_20, (64, 2), (1, 64), 0), alpha=1, beta=1, out=buf34)
del primals_21
return (buf34, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf9,
buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19, buf21,
buf23, buf24, buf25, buf27, reinterpret_tensor(buf29, (1, 2048), (
2048, 1), 0), buf31, buf33, primals_20, primals_18, primals_16, buf35)
class NetTan2018New(nn.Module):
def __init__(self, in_channels=3, out_classes=2):
super(NetTan2018New, self).__init__()
oc = 16
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=oc,
kernel_size=(3, 3), padding=0)
self.max1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=oc, out_channels=oc * 2,
kernel_size=(3, 3), padding=0)
self.max2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(in_channels=oc * 2, out_channels=oc * 2,
kernel_size=(3, 3), padding=0)
self.max3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(in_channels=oc * 2, out_channels=oc * 4,
kernel_size=(3, 3), padding=0)
self.conv5 = nn.Conv2d(in_channels=oc * 4, out_channels=oc * 4,
kernel_size=(3, 3), padding=0)
self.max5 = nn.MaxPool2d(2, 2)
self.conv6 = nn.Conv2d(in_channels=oc * 4, out_channels=oc * 8,
kernel_size=(3, 3), padding=0)
self.conv7 = nn.Conv2d(in_channels=oc * 8, out_channels=oc * 8,
kernel_size=(3, 3), padding=0)
self.hidden1 = nn.Linear(in_features=4 * 4 * 128, out_features=128)
self.hidden2 = nn.Linear(in_features=128, out_features=64)
self.final = nn.Linear(in_features=64, out_features=out_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.hidden1.weight
primals_17 = self.hidden1.bias
primals_18 = self.hidden2.weight
primals_19 = self.hidden2.bias
primals_20 = self.final.weight
primals_21 = self.final.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
Nicolik/SimpleCNNClassifier
|
NetTan2018
| false
| 8,616
|
[
"MIT"
] | 11
|
e5cd37fbde90f4096183658abe3f8836be92a8f2
|
https://github.com/Nicolik/SimpleCNNClassifier/tree/e5cd37fbde90f4096183658abe3f8836be92a8f2
|
CRFRNN
|
import torch
import torch._C
import torch.serialization
from torch import nn
from torch.nn import init
from torch.nn import Parameter
def make_onehot_kernel(kernel_size, index):
"""
Make 2D one hot square kernel, i.e. h=w
k[kernel_size, kernel_size] = 0 except k.view(-1)[index] = 1
"""
kernel = torch.zeros(kernel_size, kernel_size)
kernel.view(-1)[index] = 1
return kernel.view(1, 1, kernel_size, kernel_size)
def make_spatial_kernel(kernel_size, bandwidth, isreshape=True):
"""
Make 2D square smoothness kernel, i.e. h=w
k = 1/bandwidth * exp(-(pj-pi)**2/(2*bandwidth**2))
pj, pi = location of pixel
"""
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
p_end = (kernel_size - 1) // 2
X = torch.linspace(-p_end, p_end, steps=kernel_size).expand(kernel_size,
kernel_size)
Y = X.clone().t()
kernel = torch.exp(-(X ** 2 + Y ** 2) / (2 * bandwidth ** 2))
kernel[p_end, p_end] = 0
if isreshape:
return kernel.view(1, 1, kernel_size, kernel_size)
return kernel
class GaussianMask(nn.Module):
"""
Break down Gaussian kernel (2nd part of appearance kernel) into CNN
kj = (I(j) - I(i))**2/2*bandwidth**2, j#i
but compute all maps instead of 1 kernel
"""
def __init__(self, in_channels, kernel_size, bandwidth, iskernel=True):
super(GaussianMask, self).__init__()
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
self.bandwidth = bandwidth
self.iskernel = iskernel
self.n_kernels = kernel_size ** 2 - 1
kernel_weight = self._make_kernel_weight(in_channels, kernel_size,
self.n_kernels)
padding = kernel_size // 2
self.conv = nn.Conv2d(in_channels, in_channels * self.n_kernels,
kernel_size, stride=1, padding=padding, groups=in_channels,
bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight.view_as(self.conv.weight))
def _make_kernel_weight(self, in_channels, kernel_size, n_kernels):
kernel_weight = torch.zeros(in_channels, n_kernels, kernel_size,
kernel_size)
for i in range(n_kernels):
index = i if i < n_kernels // 2 else i + 1
kernel_i = make_onehot_kernel(kernel_size, index)
kernel_weight[:, i, :] = kernel_i
return kernel_weight
def forward(self, X):
batch_size, in_channels, H, W = X.shape
Xj = self.conv(X).view(batch_size, in_channels, self.n_kernels, H, W)
if not self.iskernel:
return Xj
Xi = X.unsqueeze(dim=2)
K = (Xj - Xi) ** 2 / (2 * self.bandwidth ** 2)
K = torch.exp(-K)
return K
class SpatialFilter(nn.Module):
"""
Break down spatial filter (smoothest kernel) into CNN blocks
refer: https://arxiv.org/pdf/1210.5644.pdf
"""
def __init__(self, n_classes, kernel_size, theta_gamma):
super(SpatialFilter, self).__init__()
padding = kernel_size // 2
kernel_weight = make_spatial_kernel(kernel_size, theta_gamma)
self.conv = nn.Conv2d(n_classes, n_classes, kernel_size, stride=1,
padding=padding, groups=n_classes, bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight)
def forward(self, Q):
Qtilde = self.conv(Q)
norm_weight = self.conv(Q.new_ones(*Q.shape, requires_grad=False))
Qtilde = Qtilde / norm_weight
return Qtilde
class BilateralFilter(nn.Module):
"""
Break down bilateral filter (appearance kernel) into CNN blocks
remember that exp(-a-b) =exp(-a)*exp(b)
"""
def __init__(self, in_channels, n_classes, kernel_size, theta_alpha,
theta_beta):
super(BilateralFilter, self).__init__()
kernel_weight = make_spatial_kernel(kernel_size, theta_alpha,
isreshape=False)
self.spatial_weight = Parameter(kernel_weight[kernel_weight > 0].
view(1, 1, 1, -1, 1, 1), requires_grad=False)
self.gauss_mask_I = GaussianMask(in_channels, kernel_size, theta_beta)
self.guass_mask_Q = GaussianMask(n_classes, kernel_size, 1,
iskernel=False)
def forward(self, Q, I):
Ij = self.gauss_mask_I(I)
Qj = self.guass_mask_Q(Q)
Qj = Ij.unsqueeze(dim=2) * Qj.unsqueeze(dim=1)
Qj = Qj * self.spatial_weight
Qtilde = Qj.sum(dim=3)
norm_weight = Ij * self.spatial_weight.squeeze(dim=2)
norm_weight = norm_weight.sum(dim=2)
Qtilde = Qtilde / norm_weight.unsqueeze(dim=2)
return Qtilde
class MessagePassing(nn.Module):
"""
Combine bilateral filter (appearance filter)
and spatial filter to make message passing
"""
def __init__(self, in_channels, n_classes, kernel_size=[3], theta_alpha
=[2.0], theta_beta=[2.0], theta_gamma=[2.0]):
super(MessagePassing, self).__init__()
assert len(theta_alpha) == len(theta_beta
), 'theta_alpha and theta_beta have different lengths'
self.n_bilaterals, self.n_spatials = len(theta_alpha), len(theta_gamma)
for i in range(self.n_bilaterals):
self.add_module('bilateral{}'.format(i), BilateralFilter(
in_channels, n_classes, kernel_size[i], theta_alpha[i],
theta_beta[i]))
for i in range(self.n_spatials):
self.add_module('spatial{}'.format(i), SpatialFilter(n_classes,
kernel_size[i], theta_gamma[i]))
def _get_child(self, child_name):
return getattr(self, child_name)
def forward(self, Q, I):
filteredQ = []
for i in range(self.n_bilaterals):
tmp_bilateral = self._get_child('bilateral{}'.format(i))(Q, I)
filteredQ.append(tmp_bilateral)
for i in range(self.n_spatials):
tmp_spatial = self._get_child('spatial{}'.format(i))(Q)
filteredQ.append(tmp_spatial.unsqueeze(dim=1))
Qtilde = torch.cat(filteredQ, dim=1)
return Qtilde
class CRFRNN(nn.Module):
""" Break meanfields down as CNN and do iteration """
def __init__(self, n_iter, in_channels, n_classes, kernel_size=[3, 3],
theta_alpha=[1.5, 2.5], theta_beta=[1.5, 2.5], theta_gamma=[1.5]):
super(CRFRNN, self).__init__()
self.n_iter = n_iter
self.n_classes = n_classes
n_filters = in_channels * len(theta_alpha) + len(theta_gamma)
self.softmax = nn.Softmax2d()
self.messagepassing = MessagePassing(in_channels, n_classes,
kernel_size=kernel_size, theta_alpha=theta_alpha, theta_beta=
theta_beta, theta_gamma=theta_gamma)
self.weightfiltering = Parameter(torch.rand(1, n_filters, n_classes,
1, 1))
self.compatibilitytransf = nn.Conv2d(n_classes, n_classes,
kernel_size=1, stride=1, padding=0, bias=False)
self._weight_initial()
self.train_step = 0
def _weight_initial(self):
init.kaiming_normal_(self.weightfiltering)
init.kaiming_normal_(self.compatibilitytransf.weight)
def forward(self, U, I):
if self.training:
if self.train_step < 60000:
self.n_iter = 1
elif self.train_step < 70000:
self.n_iter = 2
elif self.train_step < 75000:
self.n_iter = 3
else:
self.n_iter = 4
self.train_step = self.train_step + 1
else:
self.n_iter = 8
Q = U
for _ in range(self.n_iter):
Q = self.softmax(Q)
Q = self.messagepassing(Q, I)
Q = Q * self.weightfiltering
Q = Q.sum(dim=1)
Q = self.compatibilitytransf(Q
) - Q * self.compatibilitytransf.weight.squeeze().diag().view(
1, self.n_classes, 1, 1)
Q = U - Q
return Q
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_iter': 4, 'in_channels': 4, 'n_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch._C
import torch.serialization
from torch import nn
from torch.nn import init
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_div_exp_neg_pow_sub_2(in_out_ptr0, in_out_ptr1,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x2 = xindex // 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp8 = tl.load(in_out_ptr1 + x3, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.2222222222222222
tmp5 = tmp3 * tmp4
tmp6 = -tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp8 - tmp1
tmp10 = tmp9 * tmp9
tmp11 = 0.08
tmp12 = tmp10 * tmp11
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(in_out_ptr0 + x3, tmp7, None)
tl.store(in_out_ptr1 + x3, tmp14, None)
@triton.jit
def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 128 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_per_fused_div_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 1024
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x0 = xindex % 16
x5 = xindex // 64
x1 = xindex // 16 % 4
x3 = xindex // 256
x2 = xindex // 64 % 4
x7 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r4 + 128 * x5), xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r4 + 128 * x1 + 512 * x3), xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr2 + r4, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x0 + 16 * x5), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp10 = tmp8 / tmp9
tl.store(out_ptr1 + (x2 + 9 * x7 + 576 * x3), tmp10, xmask)
@triton.jit
def triton_per_fused_div_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 1024
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x0 = xindex % 16
x5 = xindex // 64
x1 = xindex // 16 % 4
x3 = xindex // 256
x2 = xindex // 64 % 4
x7 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r4 + 128 * x5), xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r4 + 128 * x1 + 512 * x3), xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr2 + r4, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x0 + 16 * x5), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp10 = tmp8 / tmp9
tl.store(out_ptr1 + (x2 + 9 * x7 + 576 * x3), tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_new_ones_6(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + 9 * x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 36
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 9
y1 = yindex // 9
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 9 * x2 + 576 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_mul_sum_9(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r3 = rindex
x2 = xindex // 64
x4 = xindex % 64
x1 = xindex // 16 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 64 * r3 + 576 * x2), rmask & xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + (x1 + 4 * r3), rmask & xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_diagonal_copy_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sub_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr3 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr3 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr3 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp29 = tl.load(in_ptr3 + 3)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp5 = tmp2 * tmp4
tmp6 = tmp1 - tmp5
tmp7 = tmp0 - tmp6
tmp13 = tmp10 * tmp12
tmp14 = tmp9 - tmp13
tmp15 = tmp8 - tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp22 = tmp19 * tmp21
tmp23 = tmp18 - tmp22
tmp24 = tmp17 - tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp31 = tmp28 * tmp30
tmp32 = tmp27 - tmp31
tmp33 = tmp26 - tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tl.store(out_ptr0 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sub_12(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp1 - tmp4
tmp6 = tmp0 - tmp5
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp1 - tmp4
tmp6 = tmp0 - tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_4, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_5, (1, 1, 1, 8, 1, 1), (8, 8, 8, 1, 1, 1))
assert_size_stride(primals_6, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_8, (1, 1, 1, 8, 1, 1), (8, 8, 8, 1, 1, 1))
assert_size_stride(primals_9, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_10, (1, 9, 4, 1, 1), (36, 4, 1, 1, 1))
assert_size_stride(primals_11, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_2, primals_3, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
del primals_3
buf3 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf7 = extern_kernels.convolution(primals_2, primals_6, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (4, 32, 4, 4), (512, 16, 4, 1))
del primals_6
buf4 = reinterpret_tensor(buf2, (4, 4, 8, 4, 4), (512, 128, 16, 4,
1), 0)
del buf2
buf9 = reinterpret_tensor(buf7, (4, 4, 8, 4, 4), (512, 128, 16, 4,
1), 0)
del buf7
triton_poi_fused_div_exp_neg_pow_sub_2[grid(2048)](buf4, buf9,
primals_2, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf6 = buf0
del buf0
triton_per_fused_mul_sum_3[grid(256)](buf4, primals_5, buf6, 256, 8,
XBLOCK=8, num_warps=2, num_stages=1)
buf18 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 1, 144, 36, 9),
torch.float32)
buf15 = reinterpret_tensor(buf18, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf3, primals_5,
buf6, buf15, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf3
buf8 = extern_kernels.convolution(buf1, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf8, (4, 32, 4, 4), (512, 16, 4, 1))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_mul_sum_3[grid(256)](buf9, primals_8, buf11, 256,
8, XBLOCK=8, num_warps=2, num_stages=1)
buf16 = reinterpret_tensor(buf18, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf8, primals_8,
buf11, buf16, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf8
buf12 = extern_kernels.convolution(buf1, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = buf1
del buf1
triton_poi_fused_convolution_new_ones_6[grid(256)](buf13, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1))
buf17 = reinterpret_tensor(buf18, (4, 1, 4, 4, 4), (576, 1, 144, 36,
9), 8)
triton_poi_fused_cat_7[grid(256)](buf12, buf14, buf17, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf18, buf19, 36, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf15
del buf16
del buf17
buf20 = buf12
del buf12
triton_per_fused_mul_sum_9[grid(256)](buf19, primals_10, buf20, 256,
9, XBLOCK=1, num_warps=2, num_stages=1)
buf21 = extern_kernels.convolution(buf20, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1))
buf22 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_diagonal_copy_10[grid(4)](primals_11, buf22, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf21,
buf20, buf22, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf24 = buf21
del buf21
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf24, primals_1,
buf20, buf22, buf23, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf25 = buf13
del buf13
triton_poi_fused__softmax_1[grid(256)](buf24, buf25, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf26 = extern_kernels.convolution(buf25, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf26, (4, 32, 4, 4), (512, 16, 4, 1))
buf34 = buf18
del buf18
buf31 = reinterpret_tensor(buf34, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf26, primals_5,
buf6, buf31, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf26
buf28 = extern_kernels.convolution(buf25, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf28, (4, 32, 4, 4), (512, 16, 4, 1))
buf32 = reinterpret_tensor(buf34, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf28, primals_8,
buf11, buf32, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf28
buf30 = extern_kernels.convolution(buf25, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf30, (4, 4, 4, 4), (64, 16, 4, 1))
buf33 = reinterpret_tensor(buf34, (4, 1, 4, 4, 4), (576, 1, 144, 36,
9), 8)
triton_poi_fused_cat_7[grid(256)](buf30, buf14, buf33, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf35 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf34, buf35, 36, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf31
del buf32
del buf33
buf36 = buf30
del buf30
triton_per_fused_mul_sum_9[grid(256)](buf35, primals_10, buf36, 256,
9, XBLOCK=1, num_warps=2, num_stages=1)
buf37 = extern_kernels.convolution(buf36, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 4, 4, 4), (64, 16, 4, 1))
buf38 = buf23
del buf23
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf37,
buf36, buf22, buf38, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf39 = buf37
del buf37
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf39, primals_1,
buf36, buf22, buf38, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf40 = buf24
del buf24
triton_poi_fused__softmax_1[grid(256)](buf39, buf40, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf41 = extern_kernels.convolution(buf40, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf41, (4, 32, 4, 4), (512, 16, 4, 1))
buf49 = buf34
del buf34
buf46 = reinterpret_tensor(buf49, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf41, primals_5,
buf6, buf46, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf41
buf43 = extern_kernels.convolution(buf40, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf43, (4, 32, 4, 4), (512, 16, 4, 1))
buf47 = reinterpret_tensor(buf49, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf43, primals_8,
buf11, buf47, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf43
buf45 = extern_kernels.convolution(buf40, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf45, (4, 4, 4, 4), (64, 16, 4, 1))
buf48 = reinterpret_tensor(buf49, (4, 1, 4, 4, 4), (576, 1, 144, 36,
9), 8)
triton_poi_fused_cat_7[grid(256)](buf45, buf14, buf48, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf50 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf49, buf50, 36, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf46
del buf47
del buf48
buf51 = buf45
del buf45
triton_per_fused_mul_sum_9[grid(256)](buf50, primals_10, buf51, 256,
9, XBLOCK=1, num_warps=2, num_stages=1)
buf52 = extern_kernels.convolution(buf51, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 4, 4, 4), (64, 16, 4, 1))
buf53 = buf38
del buf38
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf52,
buf51, buf22, buf53, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf54 = buf52
del buf52
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf54, primals_1,
buf51, buf22, buf53, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf55 = buf39
del buf39
triton_poi_fused__softmax_1[grid(256)](buf54, buf55, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf56 = extern_kernels.convolution(buf55, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf56, (4, 32, 4, 4), (512, 16, 4, 1))
buf64 = buf49
del buf49
buf61 = reinterpret_tensor(buf64, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf56, primals_5,
buf6, buf61, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf56
buf58 = extern_kernels.convolution(buf55, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf58, (4, 32, 4, 4), (512, 16, 4, 1))
buf62 = reinterpret_tensor(buf64, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf58, primals_8,
buf11, buf62, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf58
buf60 = extern_kernels.convolution(buf55, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf60, (4, 4, 4, 4), (64, 16, 4, 1))
buf63 = reinterpret_tensor(buf64, (4, 1, 4, 4, 4), (576, 1, 144, 36,
9), 8)
triton_poi_fused_cat_7[grid(256)](buf60, buf14, buf63, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf65 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf64, buf65, 36, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf61
del buf62
del buf63
buf66 = buf60
del buf60
triton_per_fused_mul_sum_9[grid(256)](buf65, primals_10, buf66, 256,
9, XBLOCK=1, num_warps=2, num_stages=1)
buf67 = extern_kernels.convolution(buf66, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 4, 4, 4), (64, 16, 4, 1))
buf68 = buf53
del buf53
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf67,
buf66, buf22, buf68, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf69 = buf67
del buf67
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf69, primals_1,
buf66, buf22, buf68, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf70 = buf54
del buf54
triton_poi_fused__softmax_1[grid(256)](buf69, buf70, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf71 = extern_kernels.convolution(buf70, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf71, (4, 32, 4, 4), (512, 16, 4, 1))
buf79 = buf64
del buf64
buf76 = reinterpret_tensor(buf79, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf71, primals_5,
buf6, buf76, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf71
buf73 = extern_kernels.convolution(buf70, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf73, (4, 32, 4, 4), (512, 16, 4, 1))
buf77 = reinterpret_tensor(buf79, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf73, primals_8,
buf11, buf77, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf73
buf75 = extern_kernels.convolution(buf70, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf75, (4, 4, 4, 4), (64, 16, 4, 1))
buf78 = reinterpret_tensor(buf79, (4, 1, 4, 4, 4), (576, 1, 144, 36,
9), 8)
triton_poi_fused_cat_7[grid(256)](buf75, buf14, buf78, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf80 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf79, buf80, 36, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf76
del buf77
del buf78
buf81 = buf75
del buf75
triton_per_fused_mul_sum_9[grid(256)](buf80, primals_10, buf81, 256,
9, XBLOCK=1, num_warps=2, num_stages=1)
buf82 = extern_kernels.convolution(buf81, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 4, 4, 4), (64, 16, 4, 1))
buf83 = buf68
del buf68
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf82,
buf81, buf22, buf83, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf84 = buf82
del buf82
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf84, primals_1,
buf81, buf22, buf83, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf85 = buf69
del buf69
triton_poi_fused__softmax_1[grid(256)](buf84, buf85, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf86 = extern_kernels.convolution(buf85, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf86, (4, 32, 4, 4), (512, 16, 4, 1))
buf94 = buf79
del buf79
buf91 = reinterpret_tensor(buf94, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf86, primals_5,
buf6, buf91, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf86
buf88 = extern_kernels.convolution(buf85, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf88, (4, 32, 4, 4), (512, 16, 4, 1))
buf92 = reinterpret_tensor(buf94, (4, 4, 4, 4, 4), (576, 1, 144, 36,
9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf88, primals_8,
buf11, buf92, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf88
buf90 = extern_kernels.convolution(buf85, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf90, (4, 4, 4, 4), (64, 16, 4, 1))
buf93 = reinterpret_tensor(buf94, (4, 1, 4, 4, 4), (576, 1, 144, 36,
9), 8)
triton_poi_fused_cat_7[grid(256)](buf90, buf14, buf93, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf95 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf94, buf95, 36, 64, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del buf91
del buf92
del buf93
buf96 = buf90
del buf90
triton_per_fused_mul_sum_9[grid(256)](buf95, primals_10, buf96, 256,
9, XBLOCK=1, num_warps=2, num_stages=1)
buf97 = extern_kernels.convolution(buf96, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 4, 4, 4), (64, 16, 4, 1))
buf98 = buf83
del buf83
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf97,
buf96, buf22, buf98, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf99 = buf97
del buf97
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf99, primals_1,
buf96, buf22, buf98, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf100 = buf84
del buf84
triton_poi_fused__softmax_1[grid(256)](buf99, buf100, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf101 = extern_kernels.convolution(buf100, primals_4, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf101, (4, 32, 4, 4), (512, 16, 4, 1))
buf109 = buf94
del buf94
buf106 = reinterpret_tensor(buf109, (4, 4, 4, 4, 4), (576, 1, 144,
36, 9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf101, primals_5,
buf6, buf106, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf101
buf103 = extern_kernels.convolution(buf100, primals_7, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf103, (4, 32, 4, 4), (512, 16, 4, 1))
buf107 = reinterpret_tensor(buf109, (4, 4, 4, 4, 4), (576, 1, 144,
36, 9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf103, primals_8,
buf11, buf107, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf103
buf105 = extern_kernels.convolution(buf100, primals_9, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf105, (4, 4, 4, 4), (64, 16, 4, 1))
buf108 = reinterpret_tensor(buf109, (4, 1, 4, 4, 4), (576, 1, 144,
36, 9), 8)
triton_poi_fused_cat_7[grid(256)](buf105, buf14, buf108, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf110 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf109, buf110, 36, 64, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf106
del buf107
del buf108
buf111 = buf105
del buf105
triton_per_fused_mul_sum_9[grid(256)](buf110, primals_10, buf111,
256, 9, XBLOCK=1, num_warps=2, num_stages=1)
buf112 = extern_kernels.convolution(buf111, primals_11, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf112, (4, 4, 4, 4), (64, 16, 4, 1))
buf113 = buf98
del buf98
triton_poi_fused__softmax_mul_sub_11[grid(64)](primals_1, buf112,
buf111, buf22, buf113, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf114 = buf112
del buf112
triton_poi_fused__softmax_mul_sub_12[grid(256)](buf114, primals_1,
buf111, buf22, buf113, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf113
buf115 = buf99
del buf99
triton_poi_fused__softmax_1[grid(256)](buf114, buf115, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf114
buf116 = extern_kernels.convolution(buf115, primals_4, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf116, (4, 32, 4, 4), (512, 16, 4, 1))
buf124 = buf109
del buf109
buf121 = reinterpret_tensor(buf124, (4, 4, 4, 4, 4), (576, 1, 144,
36, 9), 0)
triton_per_fused_div_mul_sum_4[grid(1024)](buf4, buf116, primals_5,
buf6, buf121, 1024, 8, XBLOCK=8, num_warps=2, num_stages=1)
del buf116
buf118 = extern_kernels.convolution(buf115, primals_7, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf118, (4, 32, 4, 4), (512, 16, 4, 1))
buf122 = reinterpret_tensor(buf124, (4, 4, 4, 4, 4), (576, 1, 144,
36, 9), 4)
triton_per_fused_div_mul_sum_5[grid(1024)](buf9, buf118, primals_8,
buf11, buf122, 1024, 8, XBLOCK=32, num_warps=2, num_stages=1)
del buf118
buf120 = extern_kernels.convolution(buf115, primals_9, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf120, (4, 4, 4, 4), (64, 16, 4, 1))
buf123 = reinterpret_tensor(buf124, (4, 1, 4, 4, 4), (576, 1, 144,
36, 9), 8)
triton_poi_fused_cat_7[grid(256)](buf120, buf14, buf123, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf125 = empty_strided_cuda((4, 9, 4, 4, 4), (576, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_8[grid(36, 64)](buf124, buf125, 36, 64, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf121
del buf122
del buf123
del buf124
buf126 = buf120
del buf120
triton_per_fused_mul_sum_9[grid(256)](buf125, primals_10, buf126,
256, 9, XBLOCK=1, num_warps=2, num_stages=1)
buf127 = extern_kernels.convolution(buf126, primals_11, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf127, (4, 4, 4, 4), (64, 16, 4, 1))
buf128 = buf127
del buf127
triton_poi_fused_mul_sub_13[grid(256)](buf128, primals_1, buf126,
buf22, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return (buf128, primals_4, primals_5, primals_7, primals_8, primals_9,
primals_10, primals_11, reinterpret_tensor(buf4, (4, 4, 1, 8, 4, 4),
(512, 128, 128, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 1, 4,
4), (64, 16, 16, 4, 1), 0), reinterpret_tensor(buf9, (4, 4, 1, 8, 4,
4), (512, 128, 128, 16, 4, 1), 0), reinterpret_tensor(buf11, (4, 4,
1, 4, 4), (64, 16, 16, 4, 1), 0), buf14, buf19, buf20, buf22, buf25,
buf35, buf36, buf40, buf50, buf51, buf55, buf65, buf66, buf70,
buf80, buf81, buf85, buf95, buf96, buf100, buf110, buf111, buf115,
buf125, buf126)
def make_onehot_kernel(kernel_size, index):
"""
Make 2D one hot square kernel, i.e. h=w
k[kernel_size, kernel_size] = 0 except k.view(-1)[index] = 1
"""
kernel = torch.zeros(kernel_size, kernel_size)
kernel.view(-1)[index] = 1
return kernel.view(1, 1, kernel_size, kernel_size)
def make_spatial_kernel(kernel_size, bandwidth, isreshape=True):
"""
Make 2D square smoothness kernel, i.e. h=w
k = 1/bandwidth * exp(-(pj-pi)**2/(2*bandwidth**2))
pj, pi = location of pixel
"""
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
p_end = (kernel_size - 1) // 2
X = torch.linspace(-p_end, p_end, steps=kernel_size).expand(kernel_size,
kernel_size)
Y = X.clone().t()
kernel = torch.exp(-(X ** 2 + Y ** 2) / (2 * bandwidth ** 2))
kernel[p_end, p_end] = 0
if isreshape:
return kernel.view(1, 1, kernel_size, kernel_size)
return kernel
class GaussianMask(nn.Module):
"""
Break down Gaussian kernel (2nd part of appearance kernel) into CNN
kj = (I(j) - I(i))**2/2*bandwidth**2, j#i
but compute all maps instead of 1 kernel
"""
def __init__(self, in_channels, kernel_size, bandwidth, iskernel=True):
super(GaussianMask, self).__init__()
assert bandwidth > 0, 'bandwidth of kernel must be > 0'
assert kernel_size % 2 != 0, 'kernel must be odd'
self.bandwidth = bandwidth
self.iskernel = iskernel
self.n_kernels = kernel_size ** 2 - 1
kernel_weight = self._make_kernel_weight(in_channels, kernel_size,
self.n_kernels)
padding = kernel_size // 2
self.conv = nn.Conv2d(in_channels, in_channels * self.n_kernels,
kernel_size, stride=1, padding=padding, groups=in_channels,
bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight.view_as(self.conv.weight))
def _make_kernel_weight(self, in_channels, kernel_size, n_kernels):
kernel_weight = torch.zeros(in_channels, n_kernels, kernel_size,
kernel_size)
for i in range(n_kernels):
index = i if i < n_kernels // 2 else i + 1
kernel_i = make_onehot_kernel(kernel_size, index)
kernel_weight[:, i, :] = kernel_i
return kernel_weight
def forward(self, X):
batch_size, in_channels, H, W = X.shape
Xj = self.conv(X).view(batch_size, in_channels, self.n_kernels, H, W)
if not self.iskernel:
return Xj
Xi = X.unsqueeze(dim=2)
K = (Xj - Xi) ** 2 / (2 * self.bandwidth ** 2)
K = torch.exp(-K)
return K
class SpatialFilter(nn.Module):
"""
Break down spatial filter (smoothest kernel) into CNN blocks
refer: https://arxiv.org/pdf/1210.5644.pdf
"""
def __init__(self, n_classes, kernel_size, theta_gamma):
super(SpatialFilter, self).__init__()
padding = kernel_size // 2
kernel_weight = make_spatial_kernel(kernel_size, theta_gamma)
self.conv = nn.Conv2d(n_classes, n_classes, kernel_size, stride=1,
padding=padding, groups=n_classes, bias=False)
self.conv.weight.requires_grad = False
self.conv.weight.copy_(kernel_weight)
def forward(self, Q):
Qtilde = self.conv(Q)
norm_weight = self.conv(Q.new_ones(*Q.shape, requires_grad=False))
Qtilde = Qtilde / norm_weight
return Qtilde
class BilateralFilter(nn.Module):
"""
Break down bilateral filter (appearance kernel) into CNN blocks
remember that exp(-a-b) =exp(-a)*exp(b)
"""
def __init__(self, in_channels, n_classes, kernel_size, theta_alpha,
theta_beta):
super(BilateralFilter, self).__init__()
kernel_weight = make_spatial_kernel(kernel_size, theta_alpha,
isreshape=False)
self.spatial_weight = Parameter(kernel_weight[kernel_weight > 0].
view(1, 1, 1, -1, 1, 1), requires_grad=False)
self.gauss_mask_I = GaussianMask(in_channels, kernel_size, theta_beta)
self.guass_mask_Q = GaussianMask(n_classes, kernel_size, 1,
iskernel=False)
def forward(self, Q, I):
Ij = self.gauss_mask_I(I)
Qj = self.guass_mask_Q(Q)
Qj = Ij.unsqueeze(dim=2) * Qj.unsqueeze(dim=1)
Qj = Qj * self.spatial_weight
Qtilde = Qj.sum(dim=3)
norm_weight = Ij * self.spatial_weight.squeeze(dim=2)
norm_weight = norm_weight.sum(dim=2)
Qtilde = Qtilde / norm_weight.unsqueeze(dim=2)
return Qtilde
class MessagePassing(nn.Module):
"""
Combine bilateral filter (appearance filter)
and spatial filter to make message passing
"""
def __init__(self, in_channels, n_classes, kernel_size=[3], theta_alpha
=[2.0], theta_beta=[2.0], theta_gamma=[2.0]):
super(MessagePassing, self).__init__()
assert len(theta_alpha) == len(theta_beta
), 'theta_alpha and theta_beta have different lengths'
self.n_bilaterals, self.n_spatials = len(theta_alpha), len(theta_gamma)
for i in range(self.n_bilaterals):
self.add_module('bilateral{}'.format(i), BilateralFilter(
in_channels, n_classes, kernel_size[i], theta_alpha[i],
theta_beta[i]))
for i in range(self.n_spatials):
self.add_module('spatial{}'.format(i), SpatialFilter(n_classes,
kernel_size[i], theta_gamma[i]))
def _get_child(self, child_name):
return getattr(self, child_name)
def forward(self, Q, I):
filteredQ = []
for i in range(self.n_bilaterals):
tmp_bilateral = self._get_child('bilateral{}'.format(i))(Q, I)
filteredQ.append(tmp_bilateral)
for i in range(self.n_spatials):
tmp_spatial = self._get_child('spatial{}'.format(i))(Q)
filteredQ.append(tmp_spatial.unsqueeze(dim=1))
Qtilde = torch.cat(filteredQ, dim=1)
return Qtilde
class CRFRNNNew(nn.Module):
""" Break meanfields down as CNN and do iteration """
def __init__(self, n_iter, in_channels, n_classes, kernel_size=[3, 3],
theta_alpha=[1.5, 2.5], theta_beta=[1.5, 2.5], theta_gamma=[1.5]):
super(CRFRNNNew, self).__init__()
self.n_iter = n_iter
self.n_classes = n_classes
n_filters = in_channels * len(theta_alpha) + len(theta_gamma)
self.softmax = nn.Softmax2d()
self.messagepassing = MessagePassing(in_channels, n_classes,
kernel_size=kernel_size, theta_alpha=theta_alpha, theta_beta=
theta_beta, theta_gamma=theta_gamma)
self.weightfiltering = Parameter(torch.rand(1, n_filters, n_classes,
1, 1))
self.compatibilitytransf = nn.Conv2d(n_classes, n_classes,
kernel_size=1, stride=1, padding=0, bias=False)
self._weight_initial()
self.train_step = 0
def _weight_initial(self):
init.kaiming_normal_(self.weightfiltering)
init.kaiming_normal_(self.compatibilitytransf.weight)
def forward(self, input_0, input_1):
primals_10 = self.weightfiltering
primals_5 = self.messagepassing.bilateral0.spatial_weight
primals_3 = self.messagepassing.bilateral0.gauss_mask_I.conv.weight
primals_4 = self.messagepassing.bilateral0.guass_mask_Q.conv.weight
primals_8 = self.messagepassing.bilateral1.spatial_weight
primals_6 = self.messagepassing.bilateral1.gauss_mask_I.conv.weight
primals_7 = self.messagepassing.bilateral1.guass_mask_Q.conv.weight
primals_9 = self.messagepassing.spatial0.conv.weight
primals_11 = self.compatibilitytransf.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Molly6/segmentation_shengteng2021
|
CRFRNN
| false
| 8,617
|
[
"Apache-2.0"
] | 21
|
33dfefa80193586f504069793d9e141944549e99
|
https://github.com/Molly6/segmentation_shengteng2021/tree/33dfefa80193586f504069793d9e141944549e99
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, in_channels=3, out_features=2):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=32,
kernel_size=(3, 3), padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size
=(3, 3), padding=1)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=(3, 3), padding=1)
self.pool3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=(3, 3), padding=1)
self.pool4 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(256 * 14 * 14, 128)
self.fc2 = nn.Linear(128, out_features=out_features)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.pool3(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool4(x)
x = x.view(-1, 256 * 14 * 14)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 3, 121, 121])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 14641
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 14641 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 43923 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1874048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 60
x2 = xindex // 1920 % 60
x3 = xindex // 115200
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 7744 * x2 + 468512 * x3), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3),
None)
tmp3 = tl.load(in_ptr0 + (3872 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3
), None)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3
), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 30
x2 = xindex // 1920
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7680 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = xindex // 128 % 15
x2 = xindex // 1920
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 7680 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 7680 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + 256 * x1 + 7680 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3968 + x0 + 256 * x1 + 7680 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 196
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 7
y1 = yindex // 7 % 7
y2 = yindex // 49
y4 = yindex
y5 = yindex % 49
tmp0 = tl.load(in_ptr0 + (x3 + 512 * y0 + 7680 * y1 + 57600 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (3840 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2
), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4096 + x3 + 512 * y0 + 7680 * y1 + 57600 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 256 * y4), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y5 + 49 * x3 + 12544 * y2), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 121, 121), (43923, 14641, 121, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 50176), (50176, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (2, 128), (128, 1))
assert_size_stride(primals_13, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 121, 121), (43923, 1, 363, 3),
torch.float32)
triton_poi_fused_1[grid(12, 14641)](primals_3, buf1, 12, 14641,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 121, 121), (468512, 1, 3872, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(1874048)](buf6, primals_2,
1874048, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32),
torch.float32)
buf8 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(460800)](buf6, buf7,
buf8, 460800, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 60, 60), (230400, 1, 3840, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(921600)](buf10, primals_5,
921600, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf10,
buf11, buf12, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(460800)](buf14, primals_7,
460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128),
torch.float32)
buf16 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(115200)](buf14,
buf15, buf16, 115200, XBLOCK=512, num_warps=8, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(230400)](buf18, primals_9,
230400, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.int8)
buf20 = empty_strided_cuda((4, 256, 7, 7), (12544, 49, 7, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(196, 256)](buf18,
buf19, buf20, 196, 256, XBLOCK=256, YBLOCK=2, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((1, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (1, 50176), (0, 1), 0),
reinterpret_tensor(primals_10, (50176, 128), (1, 50176), 0),
out=buf21)
buf22 = buf21
del buf21
triton_poi_fused_relu_13[grid(128)](buf22, primals_11, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(
primals_12, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf23)
del primals_13
return (buf23, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (1, 50176), (50176, 1), 0), buf22, primals_12, primals_10)
class NetNew(nn.Module):
def __init__(self, in_channels=3, out_features=2):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=32,
kernel_size=(3, 3), padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size
=(3, 3), padding=1)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=(3, 3), padding=1)
self.pool3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=(3, 3), padding=1)
self.pool4 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(256 * 14 * 14, 128)
self.fc2 = nn.Linear(128, out_features=out_features)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Nicolik/SimpleCNNClassifier
|
Net
| false
| 8,618
|
[
"MIT"
] | 11
|
e5cd37fbde90f4096183658abe3f8836be92a8f2
|
https://github.com/Nicolik/SimpleCNNClassifier/tree/e5cd37fbde90f4096183658abe3f8836be92a8f2
|
CELoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CELoss(nn.Module):
def __init__(self):
super(CELoss, self).__init__()
def forward(self, y_pred, y_true):
return -torch.mean(torch.sum(y_true * torch.log(F.softmax(y_pred,
dim=1)), dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused__softmax_log_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp6 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp21 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp1 / tmp7
tmp9 = tl_math.log(tmp8)
tmp10 = tmp0 * tmp9
tmp12 = tmp2 / tmp7
tmp13 = tl_math.log(tmp12)
tmp14 = tmp11 * tmp13
tmp15 = tmp10 + tmp14
tmp17 = tmp4 / tmp7
tmp18 = tl_math.log(tmp17)
tmp19 = tmp16 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = tmp6 / tmp7
tmp23 = tl_math.log(tmp22)
tmp24 = tmp21 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp29 = 64.0
tmp30 = tmp28 / tmp29
tmp31 = -tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__softmax_log_mean_mul_neg_sum_1[grid(1)](buf2,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class CELossNew(nn.Module):
def __init__(self):
super(CELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PARMAGroup/UNet-Instance-Cell-Segmentation
|
CELoss
| false
| 8,620
|
[
"MIT"
] | 30
|
79655a2c5781d2e20c7d5760f631fbb0be392292
|
https://github.com/PARMAGroup/UNet-Instance-Cell-Segmentation/tree/79655a2c5781d2e20c7d5760f631fbb0be392292
|
PositionalEncoder
|
import math
import torch
class PositionalEncoder(torch.nn.Module):
def __init__(self, max_freq, feat_size, dimensionality, base=2):
super().__init__()
self.max_freq = max_freq
self.dimensionality = dimensionality
self.num_bands = math.floor(feat_size / dimensionality / 2)
self.base = base
pad = feat_size - self.num_bands * 2 * dimensionality
self.zero_pad = torch.nn.ZeroPad2d((pad, 0, 0, 0))
def forward(self, x):
x = x / 100
x = x.unsqueeze(-1)
device = x.device
dtype = x.dtype
scales = torch.logspace(0.0, math.log(self.max_freq / 2) / math.log
(self.base), self.num_bands, base=self.base, device=device,
dtype=dtype)
scales = scales[*((None,) * (len(x.shape) - 1)), Ellipsis]
x = x * scales * math.pi
x = torch.cat([x.sin(), x.cos()], dim=-1)
x = x.flatten(1)
enc = self.zero_pad(x)
return enc
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_freq': 4, 'feat_size': 4, 'dimensionality': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = -4 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = float('nan')
tmp4 = tl.full(tmp3.shape, 0.0, tmp3.dtype)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf0,
class PositionalEncoderNew(torch.nn.Module):
def __init__(self, max_freq, feat_size, dimensionality, base=2):
super().__init__()
self.max_freq = max_freq
self.dimensionality = dimensionality
self.num_bands = math.floor(feat_size / dimensionality / 2)
self.base = base
pad = feat_size - self.num_bands * 2 * dimensionality
self.zero_pad = torch.nn.ZeroPad2d((pad, 0, 0, 0))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
PRBonn/contrastive_association
|
PositionalEncoder
| false
| 8,622
|
[
"MIT"
] | 19
|
649693494197c8d3948252daee6767b66a89c868
|
https://github.com/PRBonn/contrastive_association/tree/649693494197c8d3948252daee6767b66a89c868
|
WrapperKLDiv
|
import torch
from torch import Tensor
from torch import nn
class WrapperKLDiv(nn.Module):
"""Wrapper for KL-Divergence for easy argument passing."""
def __init__(self, reduction: 'str'='mean') ->None:
"""Constructor.
Args:
reduction (str, optional): One of 'none','batchmean','sum', 'mean'.
Defaults to 'mean'.
"""
super(WrapperKLDiv, self).__init__()
self.reduction = reduction
def forward(self, set1: 'Tensor', set2: 'Tensor') ->Tensor:
"""Computes the KL-Divergence.
Args:
set1 (Tensor): Input tensor of arbitrary shape.
set2 (Tensor): Tensor of the same shape as input.
Returns:
Tensor: Scalar by default. if reduction = 'none', then same
shape as input.
"""
return nn.functional.kl_div(set1, set2, reduction=self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float('nan')
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_sub_xlogy_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WrapperKLDivNew(nn.Module):
"""Wrapper for KL-Divergence for easy argument passing."""
def __init__(self, reduction: 'str'='mean') ->None:
"""Constructor.
Args:
reduction (str, optional): One of 'none','batchmean','sum', 'mean'.
Defaults to 'mean'.
"""
super(WrapperKLDivNew, self).__init__()
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PaccMann/paccmann_datasets
|
WrapperKLDiv
| false
| 8,623
|
[
"MIT"
] | 14
|
0cb0cee349ffab8e227f09f7df0a8bca6a71f22e
|
https://github.com/PaccMann/paccmann_datasets/tree/0cb0cee349ffab8e227f09f7df0a8bca6a71f22e
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=1):
super(DiceLoss, self).__init__()
self.smooth = smooth
def dice_coef(self, y_pred, y_true):
pred_probs = torch.sigmoid(y_pred)
y_true_f = y_true.view(-1)
y_pred_f = pred_probs.view(-1)
intersection = torch.sum(y_true_f * y_pred_f)
return (2.0 * intersection + self.smooth) / (torch.sum(y_true_f) +
torch.sum(y_pred_f) + self.smooth)
def forward(self, y_pred, y_true):
return -self.dice_coef(y_pred, y_true)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp0, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp2, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 2.0
tmp14 = tmp6 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp9 + tmp12
tmp18 = tmp17 + tmp15
tmp19 = tmp16 / tmp18
tmp20 = -tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_neg_sum_0[grid(1)](buf3, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, smooth=1):
super(DiceLossNew, self).__init__()
self.smooth = smooth
def dice_coef(self, y_pred, y_true):
pred_probs = torch.sigmoid(y_pred)
y_true_f = y_true.view(-1)
y_pred_f = pred_probs.view(-1)
intersection = torch.sum(y_true_f * y_pred_f)
return (2.0 * intersection + self.smooth) / (torch.sum(y_true_f) +
torch.sum(y_pred_f) + self.smooth)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PARMAGroup/UNet-Instance-Cell-Segmentation
|
DiceLoss
| false
| 8,624
|
[
"MIT"
] | 30
|
79655a2c5781d2e20c7d5760f631fbb0be392292
|
https://github.com/PARMAGroup/UNet-Instance-Cell-Segmentation/tree/79655a2c5781d2e20c7d5760f631fbb0be392292
|
RMSELoss
|
import torch
import torch.nn as nn
class RMSELoss(nn.Module):
def __init__(self):
super(RMSELoss, self).__init__()
self.mse = nn.MSELoss()
def forward(self, yhat, y):
return torch.sqrt(self.mse(yhat, y))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSELossNew(nn.Module):
def __init__(self):
super(RMSELossNew, self).__init__()
self.mse = nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PARMAGroup/UNet-Instance-Cell-Segmentation
|
RMSELoss
| false
| 8,626
|
[
"MIT"
] | 30
|
79655a2c5781d2e20c7d5760f631fbb0be392292
|
https://github.com/PARMAGroup/UNet-Instance-Cell-Segmentation/tree/79655a2c5781d2e20c7d5760f631fbb0be392292
|
IoULoss
|
import torch
import torch.nn as nn
class IoULoss(nn.Module):
"""
Intersection over Union Loss.
IoU = Area of Overlap / Area of Union
IoU loss is modified to use for heatmaps.
"""
def __init__(self):
super(IoULoss, self).__init__()
self.EPSILON = 1e-06
def _op_sum(self, x):
return x.sum(-1).sum(-1)
def forward(self, y_pred, y_true):
inter = self._op_sum(y_true * y_pred)
union = self._op_sum(y_true ** 2) + self._op_sum(y_pred ** 2
) - self._op_sum(y_true * y_pred)
iou = (inter + self.EPSILON) / (union + self.EPSILON)
iou = torch.mean(iou)
return 1 - iou
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy=
'evict_last')
tmp37 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr1 + 16 * r0, None, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr1 + (1 + 16 * r0), None, eviction_policy='evict_last'
)
tmp52 = tl.load(in_ptr1 + (2 + 16 * r0), None, eviction_policy='evict_last'
)
tmp55 = tl.load(in_ptr1 + (3 + 16 * r0), None, eviction_policy='evict_last'
)
tmp58 = tl.load(in_ptr1 + (4 + 16 * r0), None, eviction_policy='evict_last'
)
tmp60 = tl.load(in_ptr1 + (5 + 16 * r0), None, eviction_policy='evict_last'
)
tmp63 = tl.load(in_ptr1 + (6 + 16 * r0), None, eviction_policy='evict_last'
)
tmp66 = tl.load(in_ptr1 + (7 + 16 * r0), None, eviction_policy='evict_last'
)
tmp70 = tl.load(in_ptr1 + (8 + 16 * r0), None, eviction_policy='evict_last'
)
tmp72 = tl.load(in_ptr1 + (9 + 16 * r0), None, eviction_policy='evict_last'
)
tmp75 = tl.load(in_ptr1 + (10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp78 = tl.load(in_ptr1 + (11 + 16 * r0), None, eviction_policy=
'evict_last')
tmp82 = tl.load(in_ptr1 + (12 + 16 * r0), None, eviction_policy=
'evict_last')
tmp84 = tl.load(in_ptr1 + (13 + 16 * r0), None, eviction_policy=
'evict_last')
tmp87 = tl.load(in_ptr1 + (14 + 16 * r0), None, eviction_policy=
'evict_last')
tmp90 = tl.load(in_ptr1 + (15 + 16 * r0), None, eviction_policy=
'evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = tmp23 * tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp29 = tmp28 * tmp28
tmp30 = tmp27 + tmp29
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp22 + tmp33
tmp36 = tmp35 * tmp35
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp46 = tmp34 + tmp45
tmp48 = tmp47 * tmp0
tmp50 = tmp49 * tmp2
tmp51 = tmp48 + tmp50
tmp53 = tmp52 * tmp5
tmp54 = tmp51 + tmp53
tmp56 = tmp55 * tmp8
tmp57 = tmp54 + tmp56
tmp59 = tmp58 * tmp11
tmp61 = tmp60 * tmp13
tmp62 = tmp59 + tmp61
tmp64 = tmp63 * tmp16
tmp65 = tmp62 + tmp64
tmp67 = tmp66 * tmp19
tmp68 = tmp65 + tmp67
tmp69 = tmp57 + tmp68
tmp71 = tmp70 * tmp23
tmp73 = tmp72 * tmp25
tmp74 = tmp71 + tmp73
tmp76 = tmp75 * tmp28
tmp77 = tmp74 + tmp76
tmp79 = tmp78 * tmp31
tmp80 = tmp77 + tmp79
tmp81 = tmp69 + tmp80
tmp83 = tmp82 * tmp35
tmp85 = tmp84 * tmp37
tmp86 = tmp83 + tmp85
tmp88 = tmp87 * tmp40
tmp89 = tmp86 + tmp88
tmp91 = tmp90 * tmp43
tmp92 = tmp89 + tmp91
tmp93 = tmp81 + tmp92
tmp94 = tmp47 * tmp47
tmp95 = tmp49 * tmp49
tmp96 = tmp94 + tmp95
tmp97 = tmp52 * tmp52
tmp98 = tmp96 + tmp97
tmp99 = tmp55 * tmp55
tmp100 = tmp98 + tmp99
tmp101 = tmp58 * tmp58
tmp102 = tmp60 * tmp60
tmp103 = tmp101 + tmp102
tmp104 = tmp63 * tmp63
tmp105 = tmp103 + tmp104
tmp106 = tmp66 * tmp66
tmp107 = tmp105 + tmp106
tmp108 = tmp100 + tmp107
tmp109 = tmp70 * tmp70
tmp110 = tmp72 * tmp72
tmp111 = tmp109 + tmp110
tmp112 = tmp75 * tmp75
tmp113 = tmp111 + tmp112
tmp114 = tmp78 * tmp78
tmp115 = tmp113 + tmp114
tmp116 = tmp108 + tmp115
tmp117 = tmp82 * tmp82
tmp118 = tmp84 * tmp84
tmp119 = tmp117 + tmp118
tmp120 = tmp87 * tmp87
tmp121 = tmp119 + tmp120
tmp122 = tmp90 * tmp90
tmp123 = tmp121 + tmp122
tmp124 = tmp116 + tmp123
tmp125 = 1e-06
tmp126 = tmp93 + tmp125
tmp127 = tmp124 + tmp46
tmp128 = tmp127 - tmp93
tmp129 = tmp128 + tmp125
tmp130 = tmp126 / tmp129
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp133 = tl.sum(tmp131, 1)[:, None]
tmp134 = 16.0
tmp135 = tmp133 / tmp134
tmp136 = 1.0
tmp137 = tmp136 - tmp135
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp137, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_0[grid(1)](buf5,
arg1_1, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf5,
class IoULossNew(nn.Module):
"""
Intersection over Union Loss.
IoU = Area of Overlap / Area of Union
IoU loss is modified to use for heatmaps.
"""
def __init__(self):
super(IoULossNew, self).__init__()
self.EPSILON = 1e-06
def _op_sum(self, x):
return x.sum(-1).sum(-1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
OlgaChernytska/2D-Hand-Pose-Estimation-RGB
|
IoULoss
| false
| 8,627
|
[
"MIT"
] | 24
|
31096d628ca11ec4a9b6fa8b2509a2b3e5272125
|
https://github.com/OlgaChernytska/2D-Hand-Pose-Estimation-RGB/tree/31096d628ca11ec4a9b6fa8b2509a2b3e5272125
|
SpatialGate
|
import torch
import torch.nn as nn
class SpatialGate(nn.Module):
"""docstring for SpatialGate"""
def __init__(self, out_channels):
super(SpatialGate, self).__init__()
self.conv = nn.ConvTranspose2d(out_channels, 1, kernel_size=3,
stride=1, padding=1)
def forward(self, x):
x = self.conv(x)
return torch.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0[grid(64)](buf1, primals_2,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class SpatialGateNew(nn.Module):
"""docstring for SpatialGate"""
def __init__(self, out_channels):
super(SpatialGateNew, self).__init__()
self.conv = nn.ConvTranspose2d(out_channels, 1, kernel_size=3,
stride=1, padding=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
PRIS-CV/AP-CNN_Pytorch-master
|
SpatialGate
| false
| 8,630
|
[
"MIT"
] | 26
|
00ddefee69ab35b8435b732bdf3bd7514a3e4545
|
https://github.com/PRIS-CV/AP-CNN_Pytorch-master/tree/00ddefee69ab35b8435b732bdf3bd7514a3e4545
|
WCELoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class WCELoss(nn.Module):
def __init__(self):
super(WCELoss, self).__init__()
def forward(self, y_pred, y_true, weights):
y_true = y_true / y_true.sum(2).sum(2, dtype=torch.float).unsqueeze(-1
).unsqueeze(-1)
y_true[y_true != y_true] = 0.0
y_true = torch.sum(y_true, dim=1, dtype=torch.float).unsqueeze(1)
y_true = y_true * weights
old_range = torch.max(y_true) - torch.min(y_true)
new_range = 100 - 1
y_true = (y_true - torch.min(y_true)) * new_range / old_range + 1
return -torch.mean(torch.sum(y_true * torch.log(F.softmax(y_pred,
dim=1)), dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_div_index_put_lift_fresh_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused__softmax_add_div_log_max_min_mul_sub_3(in_ptr0,
in_ptr1, in_ptr2, out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r2 = rindex // 64
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + r3, None)
tmp21 = tl.load(in_ptr2 + r3, None)
tmp22 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr2 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(triton_helpers.min2(tmp9, 0))
tmp13 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp9, 0))
tmp14 = tmp8 - tmp11
tmp15 = 99.0
tmp16 = tmp14 * tmp15
tmp17 = tmp13 - tmp11
tmp18 = tmp16 / tmp17
tmp19 = 1.0
tmp20 = tmp18 + tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tmp29 = tmp21 / tmp28
tmp30 = tl_math.log(tmp29)
tmp31 = tmp20 * tmp30
tl.store(out_ptr4 + tl.broadcast_to(r3, [RBLOCK]), tmp31, None)
@triton.jit
def triton_per_fused_mean_neg_sum_4(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tmp12 = -tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_index_put_lift_fresh_1[grid(256)](arg0_1, buf0,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](arg2_1, buf6, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg2_1
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__softmax_add_div_log_max_min_mul_sub_3[grid(1)](buf1,
arg1_1, buf6, buf7, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf1
del buf6
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
triton_per_fused_mean_neg_sum_4[grid(1)](buf9, buf7, 1, 64, XBLOCK=
1, num_warps=2, num_stages=1)
del buf7
return buf9,
class WCELossNew(nn.Module):
def __init__(self):
super(WCELossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
PARMAGroup/UNet-Instance-Cell-Segmentation
|
WCELoss
| false
| 8,631
|
[
"MIT"
] | 30
|
79655a2c5781d2e20c7d5760f631fbb0be392292
|
https://github.com/PARMAGroup/UNet-Instance-Cell-Segmentation/tree/79655a2c5781d2e20c7d5760f631fbb0be392292
|
Quantizer
|
import torch
import torch.quantization
import torch.nn as nn
import torch.utils.data
class Quantizer(nn.Module):
def __init__(self):
super(Quantizer, self).__init__()
def forward(self, x, fine_tune=False):
cur_device = x.device
if self.training or fine_tune:
res = x + (torch.rand(x.size(), device=cur_device) - 0.5)
else:
res = torch.round(x)
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.quantization
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.nearbyint(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_round_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class QuantizerNew(nn.Module):
def __init__(self):
super(QuantizerNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Orange-OpenSource/AIVC
|
Quantizer
| false
| 8,632
|
[
"BSD-3-Clause"
] | 18
|
8534111d1e08cdbf7efa92ebbb105af3c9044521
|
https://github.com/Orange-OpenSource/AIVC/tree/8534111d1e08cdbf7efa92ebbb105af3c9044521
|
_Sum
|
import torch
import torch.nn as nn
import torch.jit
class _Sum(nn.Module):
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return input.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
return buf0,
class _SumNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
One-sixth/ms_ssim_pytorch
|
_Sum
| false
| 8,634
|
[
"MIT"
] | 42
|
6269c62e0dd29c91fa38e4ba73d906d0c84ca966
|
https://github.com/One-sixth/ms_ssim_pytorch/tree/6269c62e0dd29c91fa38e4ba73d906d0c84ca966
|
Temperature
|
import torch
import torch.nn as nn
class Temperature(nn.Module):
"""Temperature wrapper for nn.Sequential."""
def __init__(self, temperature):
super(Temperature, self).__init__()
self.temperature = temperature
def forward(self, data):
return data / self.temperature
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TemperatureNew(nn.Module):
"""Temperature wrapper for nn.Sequential."""
def __init__(self, temperature):
super(TemperatureNew, self).__init__()
self.temperature = temperature
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
PaccMann/paccmann_predictor
|
Temperature
| false
| 8,636
|
[
"MIT"
] | 19
|
58071311310c45c1efabb34a4003b96a1c58901a
|
https://github.com/PaccMann/paccmann_predictor/tree/58071311310c45c1efabb34a4003b96a1c58901a
|
DeConvNet2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def spectral_norm(module, init=True, std=1, bound=False):
if init:
nn.init.normal_(module.weight, 0, std)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
SpectralNorm.apply(module, 'weight', bound=bound)
return module
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SpectralNorm:
def __init__(self, name, bound=False):
self.name = name
self.bound = bound
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
with torch.no_grad():
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
sigma = u @ weight_mat @ v
if self.bound:
weight_sn = weight / (sigma + 1e-06) * torch.clamp(sigma, max=1)
else:
weight_sn = weight / sigma
return weight_sn, u
@staticmethod
def apply(module, name, bound):
fn = SpectralNorm(name, bound)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', weight)
input_size = weight.size(0)
u = weight.new_empty(input_size).normal_()
module.register_buffer(name, weight)
module.register_buffer(name + '_u', u)
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class DeConvNet2(nn.Module):
def __init__(self, in_chan=1, out_chan=1, nh=8, out_activation='linear',
use_spectral_norm=False):
"""nh: determines the numbers of conv filters"""
super(DeConvNet2, self).__init__()
self.conv1 = nn.ConvTranspose2d(in_chan, nh * 16, kernel_size=4,
bias=True)
self.conv2 = nn.ConvTranspose2d(nh * 16, nh * 8, kernel_size=3,
bias=True)
self.conv3 = nn.ConvTranspose2d(nh * 8, nh * 8, kernel_size=3, bias
=True)
self.conv4 = nn.ConvTranspose2d(nh * 8, nh * 4, kernel_size=3, bias
=True)
self.conv5 = nn.ConvTranspose2d(nh * 4, out_chan, kernel_size=3,
bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
self.out_activation = get_activation(out_activation)
if use_spectral_norm:
self.conv1 = spectral_norm(self.conv1)
self.conv2 = spectral_norm(self.conv2)
self.conv3 = spectral_norm(self.conv3)
self.conv4 = spectral_norm(self.conv4)
self.conv5 = spectral_norm(self.conv5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners
=True)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners
=True)
x = self.conv4(x)
x = F.relu(x)
x = self.conv5(x)
if self.out_activation is not None:
x = self.out_activation(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 14
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.46153846153846156
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 14
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.46153846153846156
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 6, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_2(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 14
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.46153846153846156
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 14 % 14
x0 = xindex % 14
x6 = xindex // 196
x2 = xindex // 196 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 7, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 7 * tmp4 + 49 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + 7 * tmp4 + 49 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + 7 * tmp28 + 49 * x6), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + 7 * tmp28 + 49 * x6), None,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + x4, tmp41, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4857142857142857
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4857142857142857
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 17, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_7(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4857142857142857
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_8(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 36 % 36
x0 = xindex % 36
x5 = xindex // 1296
x2 = xindex // 1296 % 64
xindex % 1296
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 18, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 18 * tmp4 + 324 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + 18 * tmp4 + 324 * x5), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + 18 * tmp28 + 324 * x5), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + 18 * tmp28 + 324 * x5), None,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(out_ptr2 + x6, tmp41, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 184832
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1444 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 324 % 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 128
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 7, 7), (6272, 49, 7, 1))
buf1 = empty_strided_cuda((14, 1), (1, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(14)](buf1, 14, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((14, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_1[grid(14)](buf2, 14, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((14,), (1,), torch.int64)
triton_poi_fused__to_copy_0[grid(14)](buf3, 14, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((14,), (1,), torch.int64)
triton_poi_fused_add_clamp_1[grid(14)](buf4, 14, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((14,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_2[grid(14)](buf5, 14,
XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((14, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_2[grid(14)](buf7, 14,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 128, 14, 14), (25088, 196, 14, 1),
torch.float32)
buf9 = buf8
del buf8
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3[grid(
100352)](buf9, buf1, buf3, buf0, primals_2, buf4, buf5, buf2,
buf7, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 16, 16), (16384, 256, 16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(65536)](buf11, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 18, 18), (20736, 324, 18, 1))
buf13 = empty_strided_cuda((36, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_5[grid(36)](buf13, 36, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((36, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_6[grid(36)](buf14, 36, XBLOCK=64,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((36,), (1,), torch.int64)
triton_poi_fused__to_copy_5[grid(36)](buf15, 36, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((36,), (1,), torch.int64)
triton_poi_fused_add_clamp_6[grid(36)](buf16, 36, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((36,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_7[grid(36)](buf17,
36, XBLOCK=64, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((36, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_7[grid(36)](buf19,
36, XBLOCK=64, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 64, 36, 36), (82944, 1296, 36, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_8[grid(
331776)](buf13, buf15, buf12, primals_7, buf16, buf17, buf14,
buf19, buf21, 331776, XBLOCK=512, num_warps=8, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 32, 38, 38), (46208, 1444, 38, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_9[grid(184832)](buf23, primals_9,
184832, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf24 = extern_kernels.convolution(buf23, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1, 40, 40), (1600, 1600, 40, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_10[grid(6400)](buf25, primals_11, 6400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf26 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(82944)](
buf12, primals_7, buf26, 82944, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf12
del primals_7
buf27 = empty_strided_cuda((4, 128, 7, 7), (6272, 49, 7, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(25088)](
buf0, primals_2, buf27, 25088, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
return (buf25, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13,
buf14, buf15, buf16, buf17, buf19, buf21, buf23, buf26, buf27)
def spectral_norm(module, init=True, std=1, bound=False):
if init:
nn.init.normal_(module.weight, 0, std)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
SpectralNorm.apply(module, 'weight', bound=bound)
return module
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SpectralNorm:
def __init__(self, name, bound=False):
self.name = name
self.bound = bound
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
with torch.no_grad():
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
sigma = u @ weight_mat @ v
if self.bound:
weight_sn = weight / (sigma + 1e-06) * torch.clamp(sigma, max=1)
else:
weight_sn = weight / sigma
return weight_sn, u
@staticmethod
def apply(module, name, bound):
fn = SpectralNorm(name, bound)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', weight)
input_size = weight.size(0)
u = weight.new_empty(input_size).normal_()
module.register_buffer(name, weight)
module.register_buffer(name + '_u', u)
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class DeConvNet2New(nn.Module):
def __init__(self, in_chan=1, out_chan=1, nh=8, out_activation='linear',
use_spectral_norm=False):
"""nh: determines the numbers of conv filters"""
super(DeConvNet2New, self).__init__()
self.conv1 = nn.ConvTranspose2d(in_chan, nh * 16, kernel_size=4,
bias=True)
self.conv2 = nn.ConvTranspose2d(nh * 16, nh * 8, kernel_size=3,
bias=True)
self.conv3 = nn.ConvTranspose2d(nh * 8, nh * 8, kernel_size=3, bias
=True)
self.conv4 = nn.ConvTranspose2d(nh * 8, nh * 4, kernel_size=3, bias
=True)
self.conv5 = nn.ConvTranspose2d(nh * 4, out_chan, kernel_size=3,
bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
self.out_activation = get_activation(out_activation)
if use_spectral_norm:
self.conv1 = spectral_norm(self.conv1)
self.conv2 = spectral_norm(self.conv2)
self.conv3 = spectral_norm(self.conv3)
self.conv4 = spectral_norm(self.conv4)
self.conv5 = spectral_norm(self.conv5)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Neural-Diffusion-Research/normalized-autoencoders
|
DeConvNet2
| false
| 8,637
|
[
"MIT"
] | 30
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
https://github.com/Neural-Diffusion-Research/normalized-autoencoders/tree/0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
DeConvNet3
|
import torch
import torch.nn as nn
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class DeConvNet3(nn.Module):
def __init__(self, in_chan=1, out_chan=1, nh=32, out_activation=
'linear', activation='relu', num_groups=None):
"""nh: determines the numbers of conv filters"""
super(DeConvNet3, self).__init__()
self.num_groups = num_groups
self.fc1 = nn.ConvTranspose2d(in_chan, nh * 32, kernel_size=8, bias
=True)
self.conv1 = nn.ConvTranspose2d(nh * 32, nh * 16, kernel_size=4,
stride=2, padding=1, bias=True)
self.conv2 = nn.ConvTranspose2d(nh * 16, nh * 8, kernel_size=4,
stride=2, padding=1, bias=True)
self.conv3 = nn.ConvTranspose2d(nh * 8, out_chan, kernel_size=1,
bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
layers = [self.fc1]
layers += [] if self.num_groups is None else [self.get_norm_layer(
nh * 32)]
layers += [get_activation(activation), self.conv1]
layers += [] if self.num_groups is None else [self.get_norm_layer(
nh * 16)]
layers += [get_activation(activation), self.conv2]
layers += [] if self.num_groups is None else [self.get_norm_layer(
nh * 8)]
layers += [get_activation(activation), self.conv3]
out_activation = get_activation(out_activation)
if out_activation is not None:
layers.append(out_activation)
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def get_norm_layer(self, num_channels):
if self.num_groups is not None:
return nn.GroupNorm(num_groups=self.num_groups, num_channels=
num_channels)
else:
return None
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 121 % 1024
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 484 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1936 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 7744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (1, 1024, 8, 8), (65536, 64, 8, 1))
assert_size_stride(primals_2, (1024,), (1,))
assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1024, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1024, 11, 11), (123904, 121, 11, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(495616)](buf1, primals_2,
495616, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 512, 22, 22), (247808, 484, 22, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(991232)](buf3, primals_5,
991232, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 44, 44), (495616, 1936, 44, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(1982464)](buf5, primals_7,
1982464, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 44, 44), (1936, 1936, 44, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(7744)](buf7, primals_9, 7744,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class DeConvNet3New(nn.Module):
def __init__(self, in_chan=1, out_chan=1, nh=32, out_activation=
'linear', activation='relu', num_groups=None):
"""nh: determines the numbers of conv filters"""
super(DeConvNet3New, self).__init__()
self.num_groups = num_groups
self.fc1 = nn.ConvTranspose2d(in_chan, nh * 32, kernel_size=8, bias
=True)
self.conv1 = nn.ConvTranspose2d(nh * 32, nh * 16, kernel_size=4,
stride=2, padding=1, bias=True)
self.conv2 = nn.ConvTranspose2d(nh * 16, nh * 8, kernel_size=4,
stride=2, padding=1, bias=True)
self.conv3 = nn.ConvTranspose2d(nh * 8, out_chan, kernel_size=1,
bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
layers = [self.fc1]
layers += [] if self.num_groups is None else [self.get_norm_layer(
nh * 32)]
layers += [get_activation(activation), self.conv1]
layers += [] if self.num_groups is None else [self.get_norm_layer(
nh * 16)]
layers += [get_activation(activation), self.conv2]
layers += [] if self.num_groups is None else [self.get_norm_layer(
nh * 8)]
layers += [get_activation(activation), self.conv3]
out_activation = get_activation(out_activation)
if out_activation is not None:
layers.append(out_activation)
self.net = nn.Sequential(*layers)
def get_norm_layer(self, num_channels):
if self.num_groups is not None:
return nn.GroupNorm(num_groups=self.num_groups, num_channels=
num_channels)
else:
return None
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_8 = self.conv3.weight
primals_9 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Neural-Diffusion-Research/normalized-autoencoders
|
DeConvNet3
| false
| 8,638
|
[
"MIT"
] | 30
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
https://github.com/Neural-Diffusion-Research/normalized-autoencoders/tree/0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
ConvNet2FC
|
import torch
import torch.nn as nn
def spectral_norm(module, init=True, std=1, bound=False):
if init:
nn.init.normal_(module.weight, 0, std)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
SpectralNorm.apply(module, 'weight', bound=bound)
return module
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SpectralNorm:
def __init__(self, name, bound=False):
self.name = name
self.bound = bound
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
with torch.no_grad():
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
sigma = u @ weight_mat @ v
if self.bound:
weight_sn = weight / (sigma + 1e-06) * torch.clamp(sigma, max=1)
else:
weight_sn = weight / sigma
return weight_sn, u
@staticmethod
def apply(module, name, bound):
fn = SpectralNorm(name, bound)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', weight)
input_size = weight.size(0)
u = weight.new_empty(input_size).normal_()
module.register_buffer(name, weight)
module.register_buffer(name + '_u', u)
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class ConvNet2FC(nn.Module):
"""additional 1x1 conv layer at the top"""
def __init__(self, in_chan=1, out_chan=64, nh=8, nh_mlp=512,
out_activation='linear', use_spectral_norm=False):
"""nh: determines the numbers of conv filters"""
super(ConvNet2FC, self).__init__()
self.conv1 = nn.Conv2d(in_chan, nh * 4, kernel_size=3, bias=True)
self.conv2 = nn.Conv2d(nh * 4, nh * 8, kernel_size=3, bias=True)
self.max1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(nh * 8, nh * 8, kernel_size=3, bias=True)
self.conv4 = nn.Conv2d(nh * 8, nh * 16, kernel_size=3, bias=True)
self.max2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(nh * 16, nh_mlp, kernel_size=4, bias=True)
self.conv6 = nn.Conv2d(nh_mlp, out_chan, kernel_size=1, bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
self.out_activation = get_activation(out_activation)
if use_spectral_norm:
self.conv1 = spectral_norm(self.conv1)
self.conv2 = spectral_norm(self.conv2)
self.conv3 = spectral_norm(self.conv3)
self.conv4 = spectral_norm(self.conv4)
self.conv5 = spectral_norm(self.conv5)
layers = [self.conv1, nn.ReLU(), self.conv2, nn.ReLU(), self.max1,
self.conv3, nn.ReLU(), self.conv4, nn.ReLU(), self.max2, self.
conv5, nn.ReLU(), self.conv6]
if self.out_activation is not None:
layers.append(self.out_activation)
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 3844
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 3844 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 32 * x2 + 123008 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 30
x2 = xindex // 1920
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7680 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 86528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = xindex // 128 % 13
x2 = xindex // 1664
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 6656 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 6656 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3328 + x0 + 256 * x1 + 6656 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3456 + x0 + 256 * x1 + 6656 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 100
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 6400 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 100 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (512, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (64, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_13, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 9)](primals_4, buf0, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_1[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_2[grid(8192, 9)](primals_8, buf2, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((512, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_3[grid(65536, 16)](primals_10, buf3, 65536, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf5 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32),
torch.float32)
triton_poi_fused_convolution_relu_4[grid(128, 3844)](buf4,
primals_2, buf5, 128, 3844, XBLOCK=8, YBLOCK=128, num_warps=4,
num_stages=1)
del buf4
del primals_2
buf6 = extern_kernels.convolution(buf5, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 60, 60), (230400, 1, 3840, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(921600)](buf7, primals_5,
921600, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf8 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.float32)
buf9 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(230400)](buf7, buf8,
buf9, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 28, 28), (50176, 1, 1792, 64))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_7[grid(200704)](buf11, primals_7,
200704, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 128, 26, 26), (86528, 1, 3328, 128))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_8[grid(346112)](buf13, primals_9,
346112, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 128, 13, 13), (21632, 1, 1664, 128),
torch.float32)
buf15 = empty_strided_cuda((4, 128, 13, 13), (21632, 1, 1664, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(86528)](buf13,
buf14, buf15, 86528, XBLOCK=512, num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf14, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 512, 10, 10), (51200, 1, 5120, 512))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_10[grid(204800)](buf17,
primals_11, 204800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf18 = extern_kernels.convolution(buf17, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 10, 10), (6400, 1, 640, 64))
buf19 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_11[grid(256, 100)](buf18, primals_13,
buf19, 256, 100, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1)
del buf18
del primals_13
return (buf19, primals_1, primals_3, buf0, buf1, buf2, buf3, primals_12,
buf5, buf7, buf8, buf9, buf11, buf13, buf14, buf15, buf17)
def spectral_norm(module, init=True, std=1, bound=False):
if init:
nn.init.normal_(module.weight, 0, std)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
SpectralNorm.apply(module, 'weight', bound=bound)
return module
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SpectralNorm:
def __init__(self, name, bound=False):
self.name = name
self.bound = bound
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
with torch.no_grad():
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
sigma = u @ weight_mat @ v
if self.bound:
weight_sn = weight / (sigma + 1e-06) * torch.clamp(sigma, max=1)
else:
weight_sn = weight / sigma
return weight_sn, u
@staticmethod
def apply(module, name, bound):
fn = SpectralNorm(name, bound)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', weight)
input_size = weight.size(0)
u = weight.new_empty(input_size).normal_()
module.register_buffer(name, weight)
module.register_buffer(name + '_u', u)
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class ConvNet2FCNew(nn.Module):
"""additional 1x1 conv layer at the top"""
def __init__(self, in_chan=1, out_chan=64, nh=8, nh_mlp=512,
out_activation='linear', use_spectral_norm=False):
"""nh: determines the numbers of conv filters"""
super(ConvNet2FCNew, self).__init__()
self.conv1 = nn.Conv2d(in_chan, nh * 4, kernel_size=3, bias=True)
self.conv2 = nn.Conv2d(nh * 4, nh * 8, kernel_size=3, bias=True)
self.max1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(nh * 8, nh * 8, kernel_size=3, bias=True)
self.conv4 = nn.Conv2d(nh * 8, nh * 16, kernel_size=3, bias=True)
self.max2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(nh * 16, nh_mlp, kernel_size=4, bias=True)
self.conv6 = nn.Conv2d(nh_mlp, out_chan, kernel_size=1, bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
self.out_activation = get_activation(out_activation)
if use_spectral_norm:
self.conv1 = spectral_norm(self.conv1)
self.conv2 = spectral_norm(self.conv2)
self.conv3 = spectral_norm(self.conv3)
self.conv4 = spectral_norm(self.conv4)
self.conv5 = spectral_norm(self.conv5)
layers = [self.conv1, nn.ReLU(), self.conv2, nn.ReLU(), self.max1,
self.conv3, nn.ReLU(), self.conv4, nn.ReLU(), self.max2, self.
conv5, nn.ReLU(), self.conv6]
if self.out_activation is not None:
layers.append(self.out_activation)
self.net = nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Neural-Diffusion-Research/normalized-autoencoders
|
ConvNet2FC
| false
| 8,639
|
[
"MIT"
] | 30
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
https://github.com/Neural-Diffusion-Research/normalized-autoencoders/tree/0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
FixupResUnit
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class FixupResUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1,
stride=stride, bias=False)
self.bias1b = nn.Parameter(torch.zeros(1))
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1,
bias=False)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
if in_channels != out_channels or stride != 1:
self.shortcut = nn.Conv2d(in_channels, out_channels, 1, stride=
stride, bias=False)
else:
self.shortcut = nn.Identity()
def forward(self, x):
out = F.relu(x)
out = self.conv1(out + self.bias1a)
out = out + self.bias1b
out = F.relu(out)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
return self.shortcut(x) + out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp5 + tmp7
tmp9 = 0.0
tmp10 = tmp5 <= tmp9
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp7 = tmp4 + tmp6
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf1,
primals_4, primals_5, buf2, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_4
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf1
del buf1
triton_poi_fused_add_mul_2[grid(256)](primals_1, buf3, primals_7,
primals_8, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5
class FixupResUnitNew(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1,
stride=stride, bias=False)
self.bias1b = nn.Parameter(torch.zeros(1))
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1,
bias=False)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
if in_channels != out_channels or stride != 1:
self.shortcut = nn.Conv2d(in_channels, out_channels, 1, stride=
stride, bias=False)
else:
self.shortcut = nn.Identity()
def forward(self, input_0):
primals_2 = self.bias1a
primals_4 = self.bias1b
primals_5 = self.bias2a
primals_7 = self.scale
primals_8 = self.bias2b
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
OpenXAIProject/dac
|
FixupResUnit
| false
| 8,640
|
[
"MIT"
] | 17
|
652776e21b56dcb68839363bb077d5c5ea28d81e
|
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
|
Encoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_Attention(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_Attention, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
batch_size = x.size(0)
Q = self.fc_Q(x)
K = self.fc_K(x)
V = self.fc_V(x)
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
K = K.view(batch_size * self.num_head, -1, self.dim_head)
V = V.view(batch_size * self.num_head, -1, self.dim_head)
scale = K.size(-1) ** -0.5
context = self.attention(Q, K, V, scale)
context = context.view(batch_size, -1, self.dim_head * self.num_head)
out = self.fc(context)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class Position_wise_Feed_Forward(nn.Module):
def __init__(self, dim_model, hidden, dropout=0.0):
super(Position_wise_Feed_Forward, self).__init__()
self.fc1 = nn.Linear(dim_model, hidden)
self.fc2 = nn.Linear(hidden, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class Encoder(nn.Module):
def __init__(self, dim_model, num_head, hidden, dropout):
super(Encoder, self).__init__()
self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden,
dropout)
def forward(self, x):
out = self.attention(x)
out = self.feed_forward(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_model': 4, 'num_head': 4, 'hidden': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x5, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(
primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1),
0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1,
1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4,
1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1,
buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1,
buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(64)](buf11,
primals_13, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
triton_poi_fused_add_4[grid(64)](buf13, primals_15, buf9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_15
buf14 = buf8
del buf8
buf15 = buf7
del buf7
triton_poi_fused_native_layer_norm_5[grid(16)](buf13, buf14, buf15,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf13, buf14, buf15,
primals_16, primals_17, buf16, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf14
del buf15
del primals_17
return buf16, primals_1, primals_10, primals_16, buf4, reinterpret_tensor(
buf5, (4, 4), (4, 1), 0), buf6, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf13, primals_14, buf17, primals_12, primals_8, reinterpret_tensor(
buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1
), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0)
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_Attention(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_Attention, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
batch_size = x.size(0)
Q = self.fc_Q(x)
K = self.fc_K(x)
V = self.fc_V(x)
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
K = K.view(batch_size * self.num_head, -1, self.dim_head)
V = V.view(batch_size * self.num_head, -1, self.dim_head)
scale = K.size(-1) ** -0.5
context = self.attention(Q, K, V, scale)
context = context.view(batch_size, -1, self.dim_head * self.num_head)
out = self.fc(context)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class Position_wise_Feed_Forward(nn.Module):
def __init__(self, dim_model, hidden, dropout=0.0):
super(Position_wise_Feed_Forward, self).__init__()
self.fc1 = nn.Linear(dim_model, hidden)
self.fc2 = nn.Linear(hidden, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class EncoderNew(nn.Module):
def __init__(self, dim_model, num_head, hidden, dropout):
super(EncoderNew, self).__init__()
self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden,
dropout)
def forward(self, input_0):
primals_1 = self.attention.fc_Q.weight
primals_3 = self.attention.fc_Q.bias
primals_2 = self.attention.fc_K.weight
primals_5 = self.attention.fc_K.bias
primals_4 = self.attention.fc_V.weight
primals_7 = self.attention.fc_V.bias
primals_6 = self.attention.fc.weight
primals_9 = self.attention.fc.bias
primals_10 = self.attention.layer_norm.weight
primals_11 = self.attention.layer_norm.bias
primals_8 = self.feed_forward.fc1.weight
primals_13 = self.feed_forward.fc1.bias
primals_12 = self.feed_forward.fc2.weight
primals_15 = self.feed_forward.fc2.bias
primals_16 = self.feed_forward.layer_norm.weight
primals_17 = self.feed_forward.layer_norm.bias
primals_14 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
NTDXYG/Text-Classify-based-pytorch
|
Encoder
| false
| 8,641
|
[
"Apache-2.0"
] | 20
|
b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
https://github.com/NTDXYG/Text-Classify-based-pytorch/tree/b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
SAB
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
class SAB(nn.Module):
def __init__(self, dim_X, dim, **kwargs):
super().__init__()
self.mab = MAB(dim_X, dim_X, dim, **kwargs)
def forward(self, X, mask=None):
return self.mab(X, X, mask=mask)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_X': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 0.7071067811865476
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_cat_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = x0
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr0 + (64 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (128 + x1), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp1 >= tmp13
tl.full([1], 4, tl.int64)
tmp20 = tl.load(in_ptr0 + (192 + x1), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.where(tmp15, tmp16, tmp20)
tmp22 = tl.where(tmp10, tmp11, tmp21)
tmp23 = tl.where(tmp5, tmp6, tmp22)
tmp24 = tmp0 + tmp23
tl.store(in_out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_cat_0[grid(256)](buf1, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(1024)](buf5, buf6, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(1024)](buf5, buf6, buf7, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (16, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_cat_3[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (64, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_cat_4[grid(256)](buf10, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_5[grid(256)](buf10,
buf11, primals_9, buf12, buf13, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf11
del primals_9
return buf12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (64, 4), (4, 1), 0), buf13, primals_8
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
class SABNew(nn.Module):
def __init__(self, dim_X, dim, **kwargs):
super().__init__()
self.mab = MAB(dim_X, dim_X, dim, **kwargs)
def forward(self, input_0):
primals_1 = self.mab.fc_q.weight
primals_2 = self.mab.fc_q.bias
primals_4 = self.mab.fc_k.weight
primals_5 = self.mab.fc_k.bias
primals_6 = self.mab.fc_v.weight
primals_7 = self.mab.fc_v.bias
primals_8 = self.mab.fc_o.weight
primals_9 = self.mab.fc_o.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
OpenXAIProject/dac
|
SAB
| false
| 8,642
|
[
"MIT"
] | 17
|
652776e21b56dcb68839363bb077d5c5ea28d81e
|
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
|
GatedLinear
|
import torch
import torch.nn as nn
class GatedLinear(nn.Module):
def __init__(self, input_size, output_size):
super(GatedLinear, self).__init__()
self.linear = nn.Linear(input_size, output_size * 2)
self.glu = nn.GLU(dim=-1)
def forward(self, x, y=None, x_mask=None, y_mask=None, rel_embed=None):
return self.glu(self.linear(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_glu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
class GatedLinearNew(nn.Module):
def __init__(self, input_size, output_size):
super(GatedLinearNew, self).__init__()
self.linear = nn.Linear(input_size, output_size * 2)
self.glu = nn.GLU(dim=-1)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ParadoxZW/mmnas
|
GatedLinear
| false
| 8,643
|
[
"Apache-2.0"
] | 23
|
186ef8648e71b5fc4433faf80431a0f8bc9261a0
|
https://github.com/ParadoxZW/mmnas/tree/186ef8648e71b5fc4433faf80431a0f8bc9261a0
|
BlurPool2d
|
import torch
import torch.nn as nn
import torch.utils.data
class BlurPool2d(nn.Sequential):
"""Blur Pooling Layer (MaxPool2d replacement)
See: https://richzhang.github.io/antialiased-cnns/
Paper: https://arxiv.org/abs/1904.11486
"""
__constants__ = ['in_features']
_blur_kernel = torch.tensor([[1 / 16, 2 / 16, 1 / 16], [2 / 16, 4 / 16,
2 / 16], [1 / 16, 2 / 16, 1 / 16]])
def __init__(self, in_features):
"""
Args:
in_features (int): The number of channels in the input
"""
super().__init__()
self.in_features = in_features
self.add_module('maxpool', nn.MaxPool2d(2, stride=1))
blurpool = nn.Conv2d(in_features, in_features, kernel_size=3,
padding=1, stride=2, bias=False, groups=in_features)
blurpool.weight = torch.nn.Parameter(self._blur_kernel.repeat(
in_features, 1, 1, 1), requires_grad=False)
self.add_module('blurpool', blurpool)
def forward(self, x):
return super(BlurPool2d, self).forward(x)
def extra_repr(self):
return 'in_features={}'.format(self.in_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 3
x3 = xindex // 3
y4 = yindex
x5 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * x3 + 16 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + x2 + 4 * x3 + 16 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x2 + 4 * x3 + 16 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + x2 + 4 * x3 + 16 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 4 * x5 + 36 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_max_pool2d_with_indices_1(in_ptr0,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 1, 12, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16, 9)](arg0_1,
buf0, 16, 9, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 1, 8, 4))
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_convolution_max_pool2d_with_indices_1[grid(16, 4)](
buf1, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf1
return buf2,
class BlurPool2dNew(nn.Sequential):
"""Blur Pooling Layer (MaxPool2d replacement)
See: https://richzhang.github.io/antialiased-cnns/
Paper: https://arxiv.org/abs/1904.11486
"""
__constants__ = ['in_features']
_blur_kernel = torch.tensor([[1 / 16, 2 / 16, 1 / 16], [2 / 16, 4 / 16,
2 / 16], [1 / 16, 2 / 16, 1 / 16]])
def __init__(self, in_features):
"""
Args:
in_features (int): The number of channels in the input
"""
super().__init__()
self.in_features = in_features
self.add_module('maxpool', nn.MaxPool2d(2, stride=1))
blurpool = nn.Conv2d(in_features, in_features, kernel_size=3,
padding=1, stride=2, bias=False, groups=in_features)
blurpool.weight = torch.nn.Parameter(self._blur_kernel.repeat(
in_features, 1, 1, 1), requires_grad=False)
self.add_module('blurpool', blurpool)
def extra_repr(self):
return 'in_features={}'.format(self.in_features)
def forward(self, input_0):
arg1_1 = self.blurpool.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
Noodles-321/RegistrationEval
|
BlurPool2d
| false
| 8,644
|
[
"MIT"
] | 38
|
3631d3d5bd65acf980fcfed803fa6125970f3e88
|
https://github.com/Noodles-321/RegistrationEval/tree/3631d3d5bd65acf980fcfed803fa6125970f3e88
|
VarifocalLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class VarifocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=
self.iou_weighted, reduction=reduction, avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tmp0 > tmp5
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp0 * tmp14
tmp16 = tl.sigmoid(tmp3)
tmp17 = tmp16 - tmp0
tmp18 = tl_math.abs(tmp17)
tmp19 = tmp18 * tmp18
tmp20 = 0.75
tmp21 = tmp19 * tmp20
tmp22 = tmp0 <= tmp5
tmp23 = tmp22.to(tl.float32)
tmp24 = tmp21 * tmp23
tmp25 = tmp15 + tmp24
tmp26 = tmp12 * tmp25
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = 256.0
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class VarifocalLossNew(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLossNew, self).__init__()
assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NEUdeep/TileDetection
|
VarifocalLoss
| false
| 8,645
|
[
"Apache-2.0"
] | 41
|
f453ac868de195a7859b9bf07c813e46eb35d2d0
|
https://github.com/NEUdeep/TileDetection/tree/f453ac868de195a7859b9bf07c813e46eb35d2d0
|
ConvNet64
|
import torch
import torch.nn as nn
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class ConvNet64(nn.Module):
"""ConvNet architecture for CelebA64 following Ghosh et al., 2019"""
def __init__(self, in_chan=3, out_chan=64, nh=32, out_activation=
'linear', activation='relu', num_groups=None, use_bn=False):
super().__init__()
self.conv1 = nn.Conv2d(in_chan, nh * 4, kernel_size=5, bias=True,
stride=2)
self.conv2 = nn.Conv2d(nh * 4, nh * 8, kernel_size=5, bias=True,
stride=2)
self.conv3 = nn.Conv2d(nh * 8, nh * 16, kernel_size=5, bias=True,
stride=2)
self.conv4 = nn.Conv2d(nh * 16, nh * 32, kernel_size=5, bias=True,
stride=2)
self.fc1 = nn.Conv2d(nh * 32, out_chan, kernel_size=1, bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
self.num_groups = num_groups
self.use_bn = use_bn
layers = []
layers.append(self.conv1)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 4))
layers.append(get_activation(activation))
layers.append(self.conv2)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 8))
layers.append(get_activation(activation))
layers.append(self.conv3)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 16))
layers.append(get_activation(activation))
layers.append(self.conv4)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 32))
layers.append(get_activation(activation))
layers.append(self.fc1)
out_activation = get_activation(out_activation)
if out_activation is not None:
layers.append(out_activation)
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def get_norm_layer(self, num_channels):
if self.num_groups is not None:
return nn.GroupNorm(num_groups=self.num_groups, num_channels=
num_channels)
elif self.use_bn:
return nn.BatchNorm2d(num_channels)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 6400 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 12800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 173056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (128, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (256, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (512, 256, 5, 5), (6400, 25, 5, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (1024, 512, 5, 5), (12800, 25, 5, 1))
assert_size_stride(primals_9, (1024,), (1,))
assert_size_stride(primals_10, (64, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_11, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 3, 5, 5), (75, 1, 15, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(384, 25)](primals_1, buf0, 384, 25, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_2[grid(32768, 25)](primals_4, buf2, 32768, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((512, 256, 5, 5), (6400, 1, 1280, 256),
torch.float32)
triton_poi_fused_3[grid(131072, 25)](primals_6, buf3, 131072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((1024, 512, 5, 5), (12800, 1, 2560, 512),
torch.float32)
triton_poi_fused_4[grid(524288, 25)](primals_8, buf4, 524288, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(460800)](buf6, primals_2,
460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 256, 13, 13), (43264, 1, 3328, 256))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_6[grid(173056)](buf8, primals_5,
173056, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 1, 2560, 512))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(51200)](buf10, primals_7,
51200, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1024, 1, 1), (1024, 1, 1024, 1024))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_8[grid(4096)](buf12, primals_9,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 1, 1), (64, 1, 64, 64))
buf14 = reinterpret_tensor(buf13, (4, 64, 1, 1), (64, 1, 1, 1), 0)
del buf13
triton_poi_fused_convolution_9[grid(256)](buf14, primals_11, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
return (buf14, buf0, buf1, buf2, buf3, buf4, primals_10, buf6, buf8,
buf10, buf12)
def get_activation(s_act):
if s_act == 'relu':
return nn.ReLU(inplace=True)
elif s_act == 'sigmoid':
return nn.Sigmoid()
elif s_act == 'softplus':
return nn.Softplus()
elif s_act == 'linear':
return None
elif s_act == 'tanh':
return nn.Tanh()
elif s_act == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif s_act == 'softmax':
return nn.Softmax(dim=1)
elif s_act == 'spherical':
return SphericalActivation()
else:
raise ValueError(f'Unexpected activation: {s_act}')
class SphericalActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class ConvNet64New(nn.Module):
"""ConvNet architecture for CelebA64 following Ghosh et al., 2019"""
def __init__(self, in_chan=3, out_chan=64, nh=32, out_activation=
'linear', activation='relu', num_groups=None, use_bn=False):
super().__init__()
self.conv1 = nn.Conv2d(in_chan, nh * 4, kernel_size=5, bias=True,
stride=2)
self.conv2 = nn.Conv2d(nh * 4, nh * 8, kernel_size=5, bias=True,
stride=2)
self.conv3 = nn.Conv2d(nh * 8, nh * 16, kernel_size=5, bias=True,
stride=2)
self.conv4 = nn.Conv2d(nh * 16, nh * 32, kernel_size=5, bias=True,
stride=2)
self.fc1 = nn.Conv2d(nh * 32, out_chan, kernel_size=1, bias=True)
self.in_chan, self.out_chan = in_chan, out_chan
self.num_groups = num_groups
self.use_bn = use_bn
layers = []
layers.append(self.conv1)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 4))
layers.append(get_activation(activation))
layers.append(self.conv2)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 8))
layers.append(get_activation(activation))
layers.append(self.conv3)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 16))
layers.append(get_activation(activation))
layers.append(self.conv4)
if num_groups is not None:
layers.append(self.get_norm_layer(num_channels=nh * 32))
layers.append(get_activation(activation))
layers.append(self.fc1)
out_activation = get_activation(out_activation)
if out_activation is not None:
layers.append(out_activation)
self.net = nn.Sequential(*layers)
def get_norm_layer(self, num_channels):
if self.num_groups is not None:
return nn.GroupNorm(num_groups=self.num_groups, num_channels=
num_channels)
elif self.use_bn:
return nn.BatchNorm2d(num_channels)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Neural-Diffusion-Research/normalized-autoencoders
|
ConvNet64
| false
| 8,646
|
[
"MIT"
] | 30
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
https://github.com/Neural-Diffusion-Research/normalized-autoencoders/tree/0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
MAB
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_X': 4, 'dim_Y': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 0.7071067811865476
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_cat_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = x0
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr0 + (64 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (128 + x1), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp1 >= tmp13
tl.full([1], 4, tl.int64)
tmp20 = tl.load(in_ptr0 + (192 + x1), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.where(tmp15, tmp16, tmp20)
tmp22 = tl.where(tmp10, tmp11, tmp21)
tmp23 = tl.where(tmp5, tmp6, tmp22)
tmp24 = tmp0 + tmp23
tl.store(in_out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_cat_0[grid(256)](buf1, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(1024)](buf5, buf6, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(1024)](buf5, buf6, buf7, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (16, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_cat_3[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (64, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_cat_4[grid(256)](buf10, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_5[grid(256)](buf10,
buf11, primals_10, buf12, buf13, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf11
del primals_10
return buf12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (64, 4), (4, 1), 0), buf13, primals_9
class MABNew(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, input_0, input_1):
primals_1 = self.fc_q.weight
primals_2 = self.fc_q.bias
primals_4 = self.fc_k.weight
primals_5 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_9 = self.fc_o.weight
primals_10 = self.fc_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
OpenXAIProject/dac
|
MAB
| false
| 8,647
|
[
"MIT"
] | 17
|
652776e21b56dcb68839363bb077d5c5ea28d81e
|
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
|
RMSPE
|
import torch
import torch.nn as nn
class RMSPE(nn.Module):
def __init__(self, eps: 'float'=1e-08):
super().__init__()
self.eps = eps
def forward(self, pred: 'torch.Tensor', target: 'torch.Tensor'):
return torch.sqrt(torch.mean(torch.square((pred - target).abs() / (
target.abs() + self.eps))))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl_math.abs(tmp1)
tmp5 = 1e-08
tmp6 = tmp4 + tmp5
tmp7 = tmp3 / tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tmp14 = libdevice.sqrt(tmp13)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_pow_sqrt_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSPENew(nn.Module):
def __init__(self, eps: 'float'=1e-08):
super().__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Phimos/SIGSPATIAL-2021-GISCUP-3rd-Solution
|
RMSPE
| false
| 8,648
|
[
"MIT"
] | 11
|
79fcf9941c28cdb2eb38a3654e1514a1d998a41c
|
https://github.com/Phimos/SIGSPATIAL-2021-GISCUP-3rd-Solution/tree/79fcf9941c28cdb2eb38a3654e1514a1d998a41c
|
AdaIN
|
import torch
import torch.nn as nn
import torch.utils.data
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'style_dim': 4, 'num_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = tmp0 - tmp10
tmp28 = tmp27 * tmp21
tmp29 = tmp26 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaINNew(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Noodles-321/RegistrationEval
|
AdaIN
| false
| 8,649
|
[
"MIT"
] | 38
|
3631d3d5bd65acf980fcfed803fa6125970f3e88
|
https://github.com/Noodles-321/RegistrationEval/tree/3631d3d5bd65acf980fcfed803fa6125970f3e88
|
Model
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden_size=256):
super(Model, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_outputs)
def forward(self, state, goal):
x = torch.cat([state, goal], 1)
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (256, 4), (4, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (4, 256), (256, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((128, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 256), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 8, 4, 256), (8192, 1024, 256, 1), 0
)
del buf1
buf4 = empty_strided_cuda((4, 8, 4, 256), (8192, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(32768)](buf2,
primals_4, buf4, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (128, 256),
(256, 1), 0), reinterpret_tensor(primals_5, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 8, 4, 4), (128, 16, 4, 1), 0
), reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(
buf2, (128, 256), (256, 1), 0), primals_5, buf4
class ModelNew(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden_size=256):
super(ModelNew, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_outputs)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_6 = self.linear2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
PacktPublishing/Hands-On-Reinforcement-Learning-for-Games
|
Model
| false
| 8,650
|
[
"MIT"
] | 41
|
045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
|
https://github.com/PacktPublishing/Hands-On-Reinforcement-Learning-for-Games/tree/045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
|
ResBlk
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def normalize(x, eps=1e-10):
return x * torch.rsqrt(torch.sum(x ** 2, dim=1, keepdim=True) + eps)
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = 0.7071067811865475
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_div_2[grid(256)](buf5, primals_1,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
def normalize(x, eps=1e-10):
return x * torch.rsqrt(torch.sum(x ** 2, dim=1, keepdim=True) + eps)
class ResBlkNew(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Noodles-321/RegistrationEval
|
ResBlk
| false
| 8,651
|
[
"MIT"
] | 38
|
3631d3d5bd65acf980fcfed803fa6125970f3e88
|
https://github.com/Noodles-321/RegistrationEval/tree/3631d3d5bd65acf980fcfed803fa6125970f3e88
|
SimpleModel
|
import torch
import torch.nn as nn
import torch.onnx
import torch.nn.functional as F
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc = nn.Linear(128, 256)
self.classifier = nn.Linear(256, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.fc(x)
x = self.classifier(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_red_fused_convolution_mean_relu_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 13824
rnumel = 125
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x1 = xindex // 128 % 27
x0 = xindex % 128
x2 = xindex // 3456
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = r3 + 125 * x1
tmp1 = tl.full([1, 1], 3364, tl.int32)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 128 * ((r3 + 125 * x1) % 3364) +
430592 * x2), rmask & tmp2 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr1 + tl.broadcast_to(x0, [XBLOCK, RBLOCK]),
rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.full([1, 1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tl.full(tmp7.shape, 0, tmp7.dtype)
tmp9 = tl.where(tmp2, tmp7, tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_per_fused_convolution_mean_relu_7(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 512
rnumel = 27
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 128
x1 = xindex // 128
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 3456 * x1), rmask & xmask,
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 3364.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128), (128, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (10, 256), (256, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 62, 62), (123008, 1, 1984, 32))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_4[grid(492032)](buf5, primals_2,
492032, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 60, 60), (230400, 1, 3840, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(921600)](buf7, primals_5,
921600, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 58, 58), (430592, 1, 7424, 128))
buf9 = empty_strided_cuda((4, 128, 1, 1, 27), (3456, 1, 13824,
13824, 128), torch.float32)
triton_red_fused_convolution_mean_relu_6[grid(13824)](buf8,
primals_7, buf9, 13824, 125, XBLOCK=64, RBLOCK=8, num_warps=4,
num_stages=1)
buf10 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512),
torch.float32)
buf11 = buf10
del buf10
triton_per_fused_convolution_mean_relu_7[grid(512)](buf11, buf9,
512, 27, XBLOCK=16, num_warps=4, num_stages=1)
del buf9
buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf11, (4, 128),
(128, 1), 0), reinterpret_tensor(primals_8, (128, 256), (1, 128
), 0), alpha=1, beta=1, out=buf12)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(
primals_10, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 128, 58, 58), (430592, 1, 7424, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(1722368)](
buf8, primals_7, buf14, 1722368, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf8
del primals_7
return buf13, buf0, buf1, buf2, buf3, buf5, buf7, reinterpret_tensor(buf11,
(4, 128), (128, 1), 0), buf12, primals_10, primals_8, buf14
class SimpleModelNew(nn.Module):
def __init__(self):
super(SimpleModelNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc = nn.Linear(128, 256)
self.classifier = nn.Linear(256, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc.weight
primals_9 = self.fc.bias
primals_10 = self.classifier.weight
primals_11 = self.classifier.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
PanJinquan/pytorch-base-trainer
|
SimpleModel
| false
| 8,652
|
[
"MIT"
] | 11
|
37799c948f72b2f9d3771ff469e06cdbff4a1d07
|
https://github.com/PanJinquan/pytorch-base-trainer/tree/37799c948f72b2f9d3771ff469e06cdbff4a1d07
|
DiceBCELoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DiceBCELoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceBCELoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = torch.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice_loss = 1 - (2.0 * intersection + smooth) / (inputs.sum() +
targets.sum() + smooth)
BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')
Dice_BCE = BCE + dice_loss
return Dice_BCE
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp4 * tmp0
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp4, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tl.broadcast_to(tmp0, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp16 / tmp27
tmp29 = 2.0
tmp30 = tmp20 * tmp29
tmp31 = tmp30 + tmp1
tmp32 = tmp23 + tmp26
tmp33 = tmp32 + tmp1
tmp34 = tmp31 / tmp33
tmp35 = tmp1 - tmp34
tmp36 = tmp28 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0[grid(1)](
buf4, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class DiceBCELossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceBCELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ProfessorHuang/2D-UNet-Pytorch
|
DiceBCELoss
| false
| 8,653
|
[
"MIT"
] | 11
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
https://github.com/ProfessorHuang/2D-UNet-Pytorch/tree/b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
ContrastiveLoss
|
import torch
import torch.nn.functional as F
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Modified from: https://hackernoon.com/facial-similarity-with-siamese-networks-in-pytorch-9642aa9db2f7
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Modified from: https://hackernoon.com/facial-similarity-with-siamese-networks-in-pytorch-9642aa9db2f7
"""
def __init__(self, margin=2.0):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
QTIM-Lab/SiameseChange
|
ContrastiveLoss
| false
| 8,654
|
[
"MIT"
] | 14
|
a58fe2a93487b3e164f1d7e0b27f5a3321bc2672
|
https://github.com/QTIM-Lab/SiameseChange/tree/a58fe2a93487b3e164f1d7e0b27f5a3321bc2672
|
SEConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class SEConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, size_splits=64,
threshold=0.005, sign_threshold=0.5, distribution='uniform'):
super(SEConv2d, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.weight = torch.nn.Parameter(nn.init.normal_(torch.randn(self.
out_channels, self.in_channels, kernel_size, kernel_size)))
def forward(self, input):
weight = self.weight.detach()
output = F.conv2d(input, weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_0[grid(16, 16)](arg0_1, buf1, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 4, 4))
del buf0
del buf1
return buf2,
class SEConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, size_splits=64,
threshold=0.005, sign_threshold=0.5, distribution='uniform'):
super(SEConv2dNew, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.weight = torch.nn.Parameter(nn.init.normal_(torch.randn(self.
out_channels, self.in_channels, kernel_size, kernel_size)))
def forward(self, input_0):
arg0_1 = self.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
PannenetsF/TQT
|
SEConv2d
| false
| 8,655
|
[
"BSD-3-Clause"
] | 14
|
3c3125327d00efe6318b28cb1d0a199b734c2c7b
|
https://github.com/PannenetsF/TQT/tree/3c3125327d00efe6318b28cb1d0a199b734c2c7b
|
ReconstructionCriterion
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ReconstructionCriterion(nn.Module):
"""
Here we calculate the criterion for -log p(x|z), we list two forms, the binary cross entropy form
as well as the mse loss form
"""
def __init__(self, x_sigma=1, bce_reconstruction=True):
super(ReconstructionCriterion, self).__init__()
self.x_sigma = x_sigma
self.bce_reconstruction = bce_reconstruction
def forward(self, x, x_reconstructed):
batch_size = x.size(0)
if self.bce_reconstruction:
reconstruct_loss = F.binary_cross_entropy_with_logits(
x_reconstructed, x, reduction='sum') / batch_size
else:
reconstruct_loss = F.mse_loss(torch.sigmoid(x_reconstructed), x,
reduction='sum') / (2 * batch_size * self.x_sigma ** 2)
return reconstruct_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 0.25
tmp17 = tmp15 * tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ReconstructionCriterionNew(nn.Module):
"""
Here we calculate the criterion for -log p(x|z), we list two forms, the binary cross entropy form
as well as the mse loss form
"""
def __init__(self, x_sigma=1, bce_reconstruction=True):
super(ReconstructionCriterionNew, self).__init__()
self.x_sigma = x_sigma
self.bce_reconstruction = bce_reconstruction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PaperCodeSubmission/ICML2020-697
|
ReconstructionCriterion
| false
| 8,656
|
[
"MIT"
] | 12
|
00f7732c236b9c6234e76a47dfebe5de314d5c01
|
https://github.com/PaperCodeSubmission/ICML2020-697/tree/00f7732c236b9c6234e76a47dfebe5de314d5c01
|
KLDiscCriterion
|
import torch
import torch.nn as nn
class KLDiscCriterion(nn.Module):
"""
calculate
sum (j=1,...,K) D_KL[q(c_j|x)||p(c_j|x)]
"""
def __init__(self):
super(KLDiscCriterion, self).__init__()
def forward(self, disc_log_pre, disc_gt, qp_order=True):
batch_size = disc_log_pre.size(0)
disc_log_gt = torch.log(disc_gt + 0.0001)
if qp_order:
loss = torch.sum(torch.exp(disc_log_pre) * (disc_log_pre -
disc_log_gt)) / batch_size
else:
loss = torch.sum(disc_gt * (disc_log_gt - disc_log_pre)
) / batch_size
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_log_mul_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl_math.exp(tmp0)
tmp3 = 0.0001
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp0 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 0.25
tmp12 = tmp10 * tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_exp_log_mul_sub_sum_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLDiscCriterionNew(nn.Module):
"""
calculate
sum (j=1,...,K) D_KL[q(c_j|x)||p(c_j|x)]
"""
def __init__(self):
super(KLDiscCriterionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PaperCodeSubmission/ICML2020-697
|
KLDiscCriterion
| false
| 8,657
|
[
"MIT"
] | 12
|
00f7732c236b9c6234e76a47dfebe5de314d5c01
|
https://github.com/PaperCodeSubmission/ICML2020-697/tree/00f7732c236b9c6234e76a47dfebe5de314d5c01
|
M1Criterion
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class M1Criterion(nn.Module):
def __init__(self, x_sigma=1, bce_reconstruction=True):
super(M1Criterion, self).__init__()
self.x_sigma = x_sigma
self.bce_reconstruction = bce_reconstruction
def forward(self, x, x_reconstructed, M1_mean, M1_log_sigma):
batch_size = x.size(0)
if self.bce_reconstruction:
reconstruct_loss = F.binary_cross_entropy_with_logits(
x_reconstructed, x, reduction='sum') / batch_size
else:
reconstruct_loss = F.mse_loss(torch.sigmoid(x_reconstructed), x,
reduction='sum') / (2 * batch_size * self.x_sigma ** 2)
M1_mean_sq = M1_mean * M1_mean
M1_log_sigma_sq = 2 * M1_log_sigma
M1_sigma_sq = torch.exp(M1_log_sigma_sq)
M1_continuous_kl_loss = 0.5 * torch.sum(M1_mean_sq + M1_sigma_sq -
M1_log_sigma_sq - 1) / batch_size
return reconstruct_loss, M1_continuous_kl_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 0.25
tmp17 = tmp15 * tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
@triton.jit
def triton_per_fused_add_div_exp_mul_sub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0 * tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 + tmp5
tmp7 = tmp6 - tmp4
tmp8 = 1.0
tmp9 = tmp7 - tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
triton_per_fused_add_div_exp_mul_sub_sum_1[grid(1)](buf3, arg2_1,
arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del arg3_1
return buf2, buf3
class M1CriterionNew(nn.Module):
def __init__(self, x_sigma=1, bce_reconstruction=True):
super(M1CriterionNew, self).__init__()
self.x_sigma = x_sigma
self.bce_reconstruction = bce_reconstruction
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
PaperCodeSubmission/ICML2020-697
|
M1Criterion
| false
| 8,658
|
[
"MIT"
] | 12
|
00f7732c236b9c6234e76a47dfebe5de314d5c01
|
https://github.com/PaperCodeSubmission/ICML2020-697/tree/00f7732c236b9c6234e76a47dfebe5de314d5c01
|
ada_mask
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, ker_size, stri, pad):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channel, out_channel, 3, 1, 1)
self.conv2 = nn.Conv2d(out_channel, out_channel, 3, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
class ada_mask(nn.Module):
def __init__(self, input_channel):
super(ada_mask, self).__init__()
self.mask_head = nn.Conv2d(input_channel, 64, 3, 1, 1)
self.mask_Res1 = ResBlock(64, 64, 3, 1, 1)
self.mask_Res2 = ResBlock(64, 64, 3, 1, 1)
self.down1 = nn.Conv2d(64, 128, 3, 2, 1)
self.mask_Res1_1d = ResBlock(128, 128, 3, 1, 1)
self.mask_Res1_2d = ResBlock(128, 128, 3, 1, 1)
self.down2 = nn.Conv2d(128, 256, 3, 2, 1)
self.mask_Res2_1d = ResBlock(256, 256, 3, 1, 1)
self.mask_Res2_2d = ResBlock(256, 256, 3, 1, 1)
self.down3 = nn.Conv2d(256, 512, 3, 2, 1)
self.mask_Res3_1d = ResBlock(512, 512, 3, 1, 1)
self.mask_Res3_2d = ResBlock(512, 512, 3, 1, 1)
self.up3 = nn.UpsamplingBilinear2d(scale_factor=2)
self.mask_Res3_1u = ResBlock(512, 256, 3, 1, 1)
self.mask_Res3_2u = ResBlock(256, 256, 3, 1, 1)
self.up2 = nn.UpsamplingBilinear2d(scale_factor=2)
self.mask_Res2_1u = ResBlock(256, 128, 3, 1, 1)
self.mask_Res2_2u = ResBlock(128, 128, 3, 1, 1)
self.up1 = nn.UpsamplingBilinear2d(scale_factor=2)
self.mask_Res1_1u = ResBlock(128, 64, 3, 1, 1)
self.mask_Res1_2u = ResBlock(64, 64, 3, 1, 1)
self.mask_tail = nn.Conv2d(64, 26, 3, 1, 1)
def forward(self, input):
maskd0 = self.mask_Res2(self.mask_Res1(self.mask_head(input)))
maskd1 = self.mask_Res1_2d(self.mask_Res1_1d(self.down1(maskd0)))
maskd2 = self.mask_Res2_2d(self.mask_Res2_1d(self.down2(maskd1)))
maskd3 = self.mask_Res3_2d(self.mask_Res3_1d(self.down3(maskd2)))
masku2 = self.mask_Res3_2u(self.mask_Res3_1u(self.up3(maskd3))
) + maskd2
masku1 = self.mask_Res2_2u(self.mask_Res2_1u(self.up2(masku2))
) + maskd1
masku0 = self.mask_Res1_2u(self.mask_Res1_1u(self.up1(masku1))
) + maskd0
mask = self.mask_tail(masku0)
return mask
def get_inputs():
return [torch.rand([4, 4, 8, 8])]
def get_init_inputs():
return [[], {'input_channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_8(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_9(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_10(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 2 % 2
x0 = xindex % 2
x5 = xindex // 4
x2 = xindex // 4 % 512
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tl.where(tmp7, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tl.where(tmp14, tmp13, tmp12)
tmp16 = tmp11 - tmp11
tmp18 = tmp16 * tmp17
tmp19 = tmp11 + tmp18
tmp21 = tmp20 + tmp1
tmp22 = tmp20 < 0
tl.where(tmp22, tmp21, tmp20)
tmp24 = tmp19 - tmp19
tmp26 = tmp24 * tmp25
tmp27 = tmp19 + tmp26
tl.store(in_out_ptr0 + x6, tmp27, None)
@triton.jit
def triton_poi_fused__to_copy_11(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_12(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_13(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_14(in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex // 16
x2 = xindex // 16 % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr4 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp13 = tmp11 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp8 + 2 * tmp17 + 4 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = tl.load(in_ptr4 + (tmp8 + 2 * tmp17 + 4 * x6), None,
eviction_policy='evict_last')
tmp21 = tmp19 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp25 + 2 * tmp17 + 4 * x6), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr4 + (tmp25 + 2 * tmp17 + 4 * x6), None,
eviction_policy='evict_last')
tmp29 = tmp27 + tmp28
tmp30 = tmp29 - tmp21
tmp32 = tmp30 * tmp31
tmp33 = tmp21 + tmp32
tmp34 = tl.load(in_ptr2 + (tmp25 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tl.load(in_ptr4 + (tmp25 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp37 = tmp35 + tmp36
tmp38 = tmp37 - tmp13
tmp39 = tmp38 * tmp31
tmp40 = tmp13 + tmp39
tmp41 = tmp40 - tmp33
tmp43 = tmp41 * tmp42
tmp44 = tmp33 + tmp43
tl.store(in_out_ptr1 + x4, tmp44, None)
@triton.jit
def triton_poi_fused__to_copy_15(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_16(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_17(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_18(in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x6 = xindex // 64
x2 = xindex // 64 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr4 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp13 = tmp11 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp8 + 4 * tmp17 + 16 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = tl.load(in_ptr4 + (tmp8 + 4 * tmp17 + 16 * x6), None,
eviction_policy='evict_last')
tmp21 = tmp19 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp25 + 4 * tmp17 + 16 * x6), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr4 + (tmp25 + 4 * tmp17 + 16 * x6), None,
eviction_policy='evict_last')
tmp29 = tmp27 + tmp28
tmp30 = tmp29 - tmp21
tmp32 = tmp30 * tmp31
tmp33 = tmp21 + tmp32
tmp34 = tl.load(in_ptr2 + (tmp25 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tl.load(in_ptr4 + (tmp25 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp37 = tmp35 + tmp36
tmp38 = tmp37 - tmp13
tmp39 = tmp38 * tmp31
tmp40 = tmp13 + tmp39
tmp41 = tmp40 - tmp33
tmp43 = tmp41 * tmp42
tmp44 = tmp33 + tmp43
tl.store(in_out_ptr1 + x4, tmp44, None)
@triton.jit
def triton_poi_fused_add_convolution_19(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 6656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 26
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (128,), (1,))
assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (256,), (1,))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_29, (256,), (1,))
assert_size_stride(primals_30, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_35, (512,), (1,))
assert_size_stride(primals_36, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_37, (512,), (1,))
assert_size_stride(primals_38, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_39, (512,), (1,))
assert_size_stride(primals_40, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_41, (512,), (1,))
assert_size_stride(primals_42, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_43, (256,), (1,))
assert_size_stride(primals_44, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_45, (256,), (1,))
assert_size_stride(primals_46, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_47, (256,), (1,))
assert_size_stride(primals_48, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_49, (256,), (1,))
assert_size_stride(primals_50, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (128,), (1,))
assert_size_stride(primals_56, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_57, (128,), (1,))
assert_size_stride(primals_58, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_59, (64,), (1,))
assert_size_stride(primals_60, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_61, (64,), (1,))
assert_size_stride(primals_62, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_63, (64,), (1,))
assert_size_stride(primals_64, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_65, (64,), (1,))
assert_size_stride(primals_66, (26, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_67, (26,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 8, 8), (4096, 64, 8, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16384)](buf1, primals_2, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 8, 8), (4096, 64, 8, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16384)](buf3, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 8, 8), (4096, 64, 8, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_0[grid(16384)](buf5, primals_7, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 8, 8), (4096, 64, 8, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_1[grid(16384)](buf7, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 8, 8), (4096, 64, 8, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_0[grid(16384)](buf9, primals_11, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 4, 4), (2048, 16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_2[grid(8192)](buf11, primals_13, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 128, 4, 4), (2048, 16, 4, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_3[grid(8192)](buf13, primals_15,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 4, 4), (2048, 16, 4, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_2[grid(8192)](buf15, primals_17, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 128, 4, 4), (2048, 16, 4, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_3[grid(8192)](buf17, primals_19,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_19
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 128, 4, 4), (2048, 16, 4, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_2[grid(8192)](buf19, primals_21, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf20 = extern_kernels.convolution(buf19, primals_22, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 256, 2, 2), (1024, 4, 2, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_4[grid(4096)](buf21, primals_23, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 256, 2, 2), (1024, 4, 2, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_5[grid(4096)](buf23, primals_25,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 2, 2), (1024, 4, 2, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_4[grid(4096)](buf25, primals_27, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 2, 2), (1024, 4, 2, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_5[grid(4096)](buf27, primals_29,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_29
buf28 = extern_kernels.convolution(buf27, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 2, 2), (1024, 4, 2, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_4[grid(4096)](buf29, primals_31, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf30 = extern_kernels.convolution(buf29, primals_32, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 512, 1, 1), (512, 1, 1, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_6[grid(2048)](buf31, primals_33, 2048,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_33
buf32 = extern_kernels.convolution(buf31, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 1, 1), (512, 1, 1, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_7[grid(2048)](buf33, primals_35,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_35
buf34 = extern_kernels.convolution(buf33, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 1, 1), (512, 1, 1, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_6[grid(2048)](buf35, primals_37, 2048,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_37
buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 1, 1), (512, 1, 1, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_7[grid(2048)](buf37, primals_39,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_39
buf38 = extern_kernels.convolution(buf37, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 1, 1), (512, 1, 1, 1))
buf39 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_8[grid(2)](buf39, 2, XBLOCK=2, num_warps=
1, num_stages=1)
buf40 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_8[grid(2)](buf40, 2, XBLOCK=2, num_warps=
1, num_stages=1)
buf41 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused__to_copy_8[grid(2)](buf41, 2, XBLOCK=2, num_warps=
1, num_stages=1)
buf42 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused__to_copy_8[grid(2)](buf42, 2, XBLOCK=2, num_warps=
1, num_stages=1)
buf43 = empty_strided_cuda((2,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_9[grid(2)](buf43, 2,
XBLOCK=2, num_warps=1, num_stages=1)
buf45 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_9[grid(2)](buf45, 2,
XBLOCK=2, num_warps=1, num_stages=1)
buf44 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
buf46 = buf44
del buf44
triton_poi_fused__unsafe_index_add_convolution_mul_sub_10[grid(8192)](
buf46, buf39, buf41, buf38, primals_41, buf42, buf43, buf40,
buf45, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del buf38
del primals_41
buf47 = extern_kernels.convolution(buf46, primals_42, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 256, 2, 2), (1024, 4, 2, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_relu_5[grid(4096)](buf48, primals_43,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_43
buf49 = extern_kernels.convolution(buf48, primals_44, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 2, 2), (1024, 4, 2, 1))
buf50 = buf49
del buf49
triton_poi_fused_convolution_4[grid(4096)](buf50, primals_45, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_45
buf51 = extern_kernels.convolution(buf50, primals_46, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 256, 2, 2), (1024, 4, 2, 1))
buf52 = buf51
del buf51
triton_poi_fused_convolution_relu_5[grid(4096)](buf52, primals_47,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf53 = extern_kernels.convolution(buf52, primals_48, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 2, 2), (1024, 4, 2, 1))
buf54 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_11[grid(4)](buf54, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf55 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_12[grid(4)](buf55, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf56 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_11[grid(4)](buf56, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf57 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_12[grid(4)](buf57, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf60 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_13[grid(4)](buf60, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf62 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_13[grid(4)](buf62, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf59 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
buf63 = buf59
del buf59
buf64 = buf63
del buf63
triton_poi_fused__unsafe_index_add_convolution_mul_sub_14[grid(16384)](
buf64, buf55, buf56, buf53, primals_49, buf29, buf54, buf57,
buf60, buf62, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del buf53
del primals_49
buf65 = extern_kernels.convolution(buf64, primals_50, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 128, 4, 4), (2048, 16, 4, 1))
buf66 = buf65
del buf65
triton_poi_fused_convolution_relu_3[grid(8192)](buf66, primals_51,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_51
buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 128, 4, 4), (2048, 16, 4, 1))
buf68 = buf67
del buf67
triton_poi_fused_convolution_2[grid(8192)](buf68, primals_53, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_53
buf69 = extern_kernels.convolution(buf68, primals_54, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 128, 4, 4), (2048, 16, 4, 1))
buf70 = buf69
del buf69
triton_poi_fused_convolution_relu_3[grid(8192)](buf70, primals_55,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_55
buf71 = extern_kernels.convolution(buf70, primals_56, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 128, 4, 4), (2048, 16, 4, 1))
buf72 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_15[grid(8)](buf72, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf73 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_16[grid(8)](buf73, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf74 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_15[grid(8)](buf74, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf75 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused_add_clamp_16[grid(8)](buf75, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf78 = empty_strided_cuda((8,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_17[grid(8)](buf78, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf80 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_17[grid(8)](buf80, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf77 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
buf81 = buf77
del buf77
buf82 = buf81
del buf81
triton_poi_fused__unsafe_index_add_convolution_mul_sub_18[grid(32768)](
buf82, buf73, buf74, buf71, primals_57, buf19, buf72, buf75,
buf78, buf80, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del buf71
del primals_57
buf83 = extern_kernels.convolution(buf82, primals_58, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 64, 8, 8), (4096, 64, 8, 1))
buf84 = buf83
del buf83
triton_poi_fused_convolution_relu_1[grid(16384)](buf84, primals_59,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_59
buf85 = extern_kernels.convolution(buf84, primals_60, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 64, 8, 8), (4096, 64, 8, 1))
buf86 = buf85
del buf85
triton_poi_fused_convolution_0[grid(16384)](buf86, primals_61,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_61
buf87 = extern_kernels.convolution(buf86, primals_62, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 64, 8, 8), (4096, 64, 8, 1))
buf88 = buf87
del buf87
triton_poi_fused_convolution_relu_1[grid(16384)](buf88, primals_63,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_63
buf89 = extern_kernels.convolution(buf88, primals_64, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 64, 8, 8), (4096, 64, 8, 1))
buf90 = buf89
del buf89
triton_poi_fused_add_convolution_19[grid(16384)](buf90, primals_65,
buf9, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_65
buf91 = extern_kernels.convolution(buf90, primals_66, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 26, 8, 8), (1664, 64, 8, 1))
buf92 = buf91
del buf91
triton_poi_fused_convolution_20[grid(6656)](buf92, primals_67, 6656,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_67
return (buf92, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, primals_48,
primals_50, primals_52, primals_54, primals_56, primals_58,
primals_60, primals_62, primals_64, primals_66, buf1, buf3, buf5,
buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25,
buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf40, buf41,
buf42, buf43, buf45, buf46, buf48, buf50, buf52, buf54, buf55,
buf56, buf57, buf60, buf62, buf64, buf66, buf68, buf70, buf72,
buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf86, buf88, buf90)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, ker_size, stri, pad):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channel, out_channel, 3, 1, 1)
self.conv2 = nn.Conv2d(out_channel, out_channel, 3, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
class ada_maskNew(nn.Module):
def __init__(self, input_channel):
super(ada_maskNew, self).__init__()
self.mask_head = nn.Conv2d(input_channel, 64, 3, 1, 1)
self.mask_Res1 = ResBlock(64, 64, 3, 1, 1)
self.mask_Res2 = ResBlock(64, 64, 3, 1, 1)
self.down1 = nn.Conv2d(64, 128, 3, 2, 1)
self.mask_Res1_1d = ResBlock(128, 128, 3, 1, 1)
self.mask_Res1_2d = ResBlock(128, 128, 3, 1, 1)
self.down2 = nn.Conv2d(128, 256, 3, 2, 1)
self.mask_Res2_1d = ResBlock(256, 256, 3, 1, 1)
self.mask_Res2_2d = ResBlock(256, 256, 3, 1, 1)
self.down3 = nn.Conv2d(256, 512, 3, 2, 1)
self.mask_Res3_1d = ResBlock(512, 512, 3, 1, 1)
self.mask_Res3_2d = ResBlock(512, 512, 3, 1, 1)
self.up3 = nn.UpsamplingBilinear2d(scale_factor=2)
self.mask_Res3_1u = ResBlock(512, 256, 3, 1, 1)
self.mask_Res3_2u = ResBlock(256, 256, 3, 1, 1)
self.up2 = nn.UpsamplingBilinear2d(scale_factor=2)
self.mask_Res2_1u = ResBlock(256, 128, 3, 1, 1)
self.mask_Res2_2u = ResBlock(128, 128, 3, 1, 1)
self.up1 = nn.UpsamplingBilinear2d(scale_factor=2)
self.mask_Res1_1u = ResBlock(128, 64, 3, 1, 1)
self.mask_Res1_2u = ResBlock(64, 64, 3, 1, 1)
self.mask_tail = nn.Conv2d(64, 26, 3, 1, 1)
def forward(self, input_0):
primals_1 = self.mask_head.weight
primals_2 = self.mask_head.bias
primals_4 = self.mask_Res1.conv1.weight
primals_5 = self.mask_Res1.conv1.bias
primals_6 = self.mask_Res1.conv2.weight
primals_7 = self.mask_Res1.conv2.bias
primals_8 = self.mask_Res2.conv1.weight
primals_9 = self.mask_Res2.conv1.bias
primals_10 = self.mask_Res2.conv2.weight
primals_11 = self.mask_Res2.conv2.bias
primals_12 = self.down1.weight
primals_13 = self.down1.bias
primals_14 = self.mask_Res1_1d.conv1.weight
primals_15 = self.mask_Res1_1d.conv1.bias
primals_16 = self.mask_Res1_1d.conv2.weight
primals_17 = self.mask_Res1_1d.conv2.bias
primals_18 = self.mask_Res1_2d.conv1.weight
primals_19 = self.mask_Res1_2d.conv1.bias
primals_20 = self.mask_Res1_2d.conv2.weight
primals_21 = self.mask_Res1_2d.conv2.bias
primals_22 = self.down2.weight
primals_23 = self.down2.bias
primals_24 = self.mask_Res2_1d.conv1.weight
primals_25 = self.mask_Res2_1d.conv1.bias
primals_26 = self.mask_Res2_1d.conv2.weight
primals_27 = self.mask_Res2_1d.conv2.bias
primals_28 = self.mask_Res2_2d.conv1.weight
primals_29 = self.mask_Res2_2d.conv1.bias
primals_30 = self.mask_Res2_2d.conv2.weight
primals_31 = self.mask_Res2_2d.conv2.bias
primals_32 = self.down3.weight
primals_33 = self.down3.bias
primals_34 = self.mask_Res3_1d.conv1.weight
primals_35 = self.mask_Res3_1d.conv1.bias
primals_36 = self.mask_Res3_1d.conv2.weight
primals_37 = self.mask_Res3_1d.conv2.bias
primals_38 = self.mask_Res3_2d.conv1.weight
primals_39 = self.mask_Res3_2d.conv1.bias
primals_40 = self.mask_Res3_2d.conv2.weight
primals_41 = self.mask_Res3_2d.conv2.bias
primals_42 = self.mask_Res3_1u.conv1.weight
primals_43 = self.mask_Res3_1u.conv1.bias
primals_44 = self.mask_Res3_1u.conv2.weight
primals_45 = self.mask_Res3_1u.conv2.bias
primals_46 = self.mask_Res3_2u.conv1.weight
primals_47 = self.mask_Res3_2u.conv1.bias
primals_48 = self.mask_Res3_2u.conv2.weight
primals_49 = self.mask_Res3_2u.conv2.bias
primals_50 = self.mask_Res2_1u.conv1.weight
primals_51 = self.mask_Res2_1u.conv1.bias
primals_52 = self.mask_Res2_1u.conv2.weight
primals_53 = self.mask_Res2_1u.conv2.bias
primals_54 = self.mask_Res2_2u.conv1.weight
primals_55 = self.mask_Res2_2u.conv1.bias
primals_56 = self.mask_Res2_2u.conv2.weight
primals_57 = self.mask_Res2_2u.conv2.bias
primals_58 = self.mask_Res1_1u.conv1.weight
primals_59 = self.mask_Res1_1u.conv1.bias
primals_60 = self.mask_Res1_1u.conv2.weight
primals_61 = self.mask_Res1_1u.conv2.bias
primals_62 = self.mask_Res1_2u.conv1.weight
primals_63 = self.mask_Res1_2u.conv1.bias
primals_64 = self.mask_Res1_2u.conv2.weight
primals_65 = self.mask_Res1_2u.conv2.bias
primals_66 = self.mask_tail.weight
primals_67 = self.mask_tail.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67])
return output[0]
|
NJUVISION/AWnet
|
ada_mask
| false
| 8,659
|
[
"MIT"
] | 16
|
f47a1692819a778b513b882d36ed727f7732d37b
|
https://github.com/NJUVISION/AWnet/tree/f47a1692819a778b513b882d36ed727f7732d37b
|
Classify
|
import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Classify(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else
[x])], 1)
return self.flat(self.conv(z))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4), (4, 1), 0), primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ClassifyNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(ClassifyNew, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g)
self.flat = nn.Flatten()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
PoCInnovation/Koic
|
Classify
| false
| 8,660
|
[
"MIT"
] | 13
|
eca53b53b7242c1e83213ef9408366ca0a346358
|
https://github.com/PoCInnovation/Koic/tree/eca53b53b7242c1e83213ef9408366ca0a346358
|
ClsCriterion
|
import torch
import torch.nn as nn
class ClsCriterion(nn.Module):
def __init__(self):
super(ClsCriterion, self).__init__()
def forward(self, predict, label, batch_weight=None):
"""
:param predict: B*C log_softmax result
:param label: B*C one-hot label
:param batch_weight: B*1 0-1 weight for each item in a batch
:return: cross entropy loss
"""
if batch_weight is None:
cls_loss = -1 * torch.mean(torch.sum(predict * label, dim=1))
else:
cls_loss = -1 * torch.mean(torch.sum(predict * label, dim=1) *
batch_weight)
return cls_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = 64.0
tmp19 = tmp17 / tmp18
tmp20 = -1.0
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ClsCriterionNew(nn.Module):
def __init__(self):
super(ClsCriterionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PaperCodeSubmission/ICML2020-697
|
ClsCriterion
| false
| 8,661
|
[
"MIT"
] | 12
|
00f7732c236b9c6234e76a47dfebe5de314d5c01
|
https://github.com/PaperCodeSubmission/ICML2020-697/tree/00f7732c236b9c6234e76a47dfebe5de314d5c01
|
IWDiscriminator
|
import torch
from torch import nn
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(ConvMeanPool, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = self.conv(input)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(MeanPoolConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, input_height, input_width, input_depth = output.size()
output_depth = int(input_depth / self.block_size_sq)
output_width = int(input_width * self.block_size)
output_height = int(input_height * self.block_size)
t_1 = output.reshape(batch_size, input_height, input_width, self.
block_size_sq, output_depth)
spl = t_1.split(self.block_size, 3)
stacks = [t_t.reshape(batch_size, input_height, output_width,
output_depth) for t_t in spl]
output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).reshape(batch_size, output_height, output_width, output_depth)
output = output.permute(0, 3, 1, 2)
return output
class UpSampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
bias=True):
super(UpSampleConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init, bias=bias)
self.depth_to_space = DepthToSpace(2)
def forward(self, input):
output = input
output = torch.cat((output, output, output, output), 1)
output = self.depth_to_space(output)
output = self.conv(output)
return output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, resample=None, hw=64
):
super(ResidualBlock, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.kernel_size = kernel_size
self.resample = resample
self.bn1 = None
self.bn2 = None
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
if resample == 'down':
self.bn1 = nn.LayerNorm([input_dim, hw, hw])
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
elif resample == 'up':
self.bn1 = nn.BatchNorm2d(input_dim)
self.bn2 = nn.BatchNorm2d(output_dim)
elif resample is None:
self.bn1 = nn.BatchNorm2d(output_dim)
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
else:
raise Exception('invalid resample value')
if resample == 'down':
self.conv_shortcut = MeanPoolConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = ConvMeanPool(input_dim, output_dim, kernel_size=
kernel_size)
elif resample == 'up':
self.conv_shortcut = UpSampleConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = UpSampleConv(input_dim, output_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(output_dim, output_dim, kernel_size=
kernel_size)
elif resample is None:
self.conv_shortcut = IWConv2d(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(input_dim, output_dim, kernel_size=
kernel_size)
else:
raise Exception('invalid resample value')
def forward(self, input):
if self.input_dim == self.output_dim and self.resample is None:
shortcut = input
else:
shortcut = self.conv_shortcut(input)
output = input
output = self.bn1(output)
output = self.relu1(output)
output = self.conv_1(output)
output = self.bn2(output)
output = self.relu2(output)
output = self.conv_2(output)
return shortcut + output
class IWDiscriminator(nn.Module):
def __init__(self, input_size=64, n_image_channels=3):
super(IWDiscriminator, self).__init__()
self.size = input_size
self.n_image_channels = n_image_channels
self.ssize = self.size // 16
self.conv1 = IWConv2d(n_image_channels, self.size, 3, he_init=False)
self.rb1 = ResidualBlock(self.size, 2 * self.size, 3, resample=
'down', hw=self.size)
self.rb2 = ResidualBlock(2 * self.size, 4 * self.size, 3, resample=
'down', hw=int(self.size / 2))
self.rb3 = ResidualBlock(4 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 4))
self.rb4 = ResidualBlock(8 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 8))
self.ln1 = nn.Linear(self.ssize * self.ssize * 8 * self.size, 1)
def forward(self, input):
output = input.contiguous()
output = output.view(-1, self.n_image_channels, self.size, self.size)
output = self.conv1(output)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = output.view(-1, self.ssize * self.ssize * 8 * self.size)
output = self.ln1(output)
output = output.view(-1)
return output
def forward_last_feature(self, input):
output = input.contiguous()
output = output.view(-1, self.n_image_channels, self.size, self.size)
output = self.conv1(output)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = output.view(-1, self.ssize * self.ssize * 8 * self.size)
out_features = output
output = self.ln1(output)
output = output.view(-1)
return output, out_features
def forward_all_feature(self, input):
out_features_list = []
output = input.contiguous()
output = output.view(-1, self.n_image_channels, self.size, self.size)
output = self.conv1(output)
out_features_list.append(output)
output = self.rb1(output)
out_features_list.append(output)
output = self.rb2(output)
out_features_list.append(output)
output = self.rb3(output)
out_features_list.append(output)
output = self.rb4(output)
output = output.view(-1, self.ssize * self.ssize * 8 * self.size)
out_features_list.append(output)
output = self.ln1(output)
out_features_list.append(output)
output = output.view(-1)
return output, out_features_list
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_view_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_add_div_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 32
x1 = xindex // 32 % 64
x2 = xindex // 2048
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * (r3 % 64) + 4096 * ((r3 + 128 *
x1) // 64 % 64) + 262144 * x2 + (r3 + 128 * x1) // 4096), None,
eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 128
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 32
x1 = xindex // 32
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_13(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 32 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 32 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 262144.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_14(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 262144 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 4096 * y0), ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 4096 * y0), ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp10, ymask)
@triton.jit
def triton_poi_fused_add_convolution_div_15(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 128
x1 = xindex // 128 % 32
x2 = xindex // 4096
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 256 * x1 + 16384 * x2), None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (8192 + x0 + 256 * x1 + 16384 * x2), None)
tmp9 = tl.load(in_ptr1 + (128 + x0 + 256 * x1 + 16384 * x2), None)
tmp12 = tl.load(in_ptr1 + (8320 + x0 + 256 * x1 + 16384 * x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_div_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 16
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_17(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 16
x1 = xindex // 16 % 64
x2 = xindex // 1024
x4 = xindex
tmp0 = tl.load(in_ptr0 + (8 * x0 + 128 * (r3 % 32) + 4096 * ((r3 + 128 *
x1) // 32 % 32) + 131072 * x2 + (r3 + 128 * x1) // 1024), None,
eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_layer_norm_18(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_19(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 131072.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = 7.62939453125e-06
tmp22 = tmp20 * tmp21
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_20(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 131072
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 131072.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(in_out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_21(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 131072 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 128 * x2 + 131072 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_per_fused_native_layer_norm_22(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 131072.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 131072 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 1024 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 128 * x2 + 131072 * y1), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_div_24(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x1 = xindex // 256 % 16
x2 = xindex // 4096
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 512 * x1 + 16384 * x2), None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (8192 + x0 + 512 * x1 + 16384 * x2), None)
tmp9 = tl.load(in_ptr1 + (256 + x0 + 512 * x1 + 16384 * x2), None)
tmp12 = tl.load(in_ptr1 + (8448 + x0 + 512 * x1 + 16384 * x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_div_25(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 8
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_26(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 8
x1 = xindex // 8 % 64
x2 = xindex // 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (32 * x0 + 256 * (r3 % 16) + 4096 * ((r3 + 128 *
x1) // 16 % 16) + 65536 * x2 + (r3 + 128 * x1) // 256), None,
eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_layer_norm_27(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 32
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 8
x1 = xindex // 8
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_28(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 65536.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = 1.52587890625e-05
tmp22 = tmp20 * tmp21
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_29(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 65536
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 65536.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(in_out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_30(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 256 * x2 + 65536 * y1), tmp6, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_31(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 65536.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_32(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 256 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 256 * x2 + 65536 * y1), tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_33(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x1 = xindex // 512 % 8
x2 = xindex // 4096
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 16384 * x2), None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (8192 + x0 + 1024 * x1 + 16384 * x2), None)
tmp9 = tl.load(in_ptr1 + (512 + x0 + 1024 * x1 + 16384 * x2), None)
tmp12 = tl.load(in_ptr1 + (8704 + x0 + 1024 * x1 + 16384 * x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_div_34(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512 % 4
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_per_fused_native_layer_norm_35(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 1024
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 256
x1 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * (r2 % 64) + 32768 * x1 + r2 //
64), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
tl.store(out_ptr2 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_36(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_37(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = 3.0517578125e-05
tmp22 = tmp20 * tmp21
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_38(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 32768
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32768.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(in_out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_39(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask)
@triton.jit
def triton_per_fused_native_layer_norm_40(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_41(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 64 * y0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1, 1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (y0 + 512 * x2 + 32768 * y1), tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_42(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y4 = yindex
y0 = yindex % 4
y5 = yindex // 4
y2 = yindex // 16
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + 512 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3 + 1024 * y0 + 8192 * y5), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (4096 + x3 + 1024 * y0 + 8192 * y5), xmask &
ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (512 + x3 + 1024 * y0 + 8192 * y5), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (4608 + x3 + 1024 * y0 + 8192 * y5), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tmp13 = tmp12 + tmp4
tmp14 = tmp11 + tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tmp2 + tmp16
tl.store(out_ptr0 + (y6 + 16 * x3 + 8192 * y2), tmp17, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (128, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_7, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_10, (64, 64, 64), (4096, 64, 1))
assert_size_stride(primals_11, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_12, (128,), (1,))
assert_size_stride(primals_13, (256, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_14, (256,), (1,))
assert_size_stride(primals_15, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_16, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_17, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_18, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_19, (128, 32, 32), (1024, 32, 1))
assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (512, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_25, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_28, (256, 16, 16), (256, 16, 1))
assert_size_stride(primals_29, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (512,), (1,))
assert_size_stride(primals_31, (512, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_32, (512,), (1,))
assert_size_stride(primals_33, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_34, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_35, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_36, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_37, (512, 8, 8), (64, 8, 1))
assert_size_stride(primals_38, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_39, (512,), (1,))
assert_size_stride(primals_40, (1, 8192), (8192, 1))
assert_size_stride(primals_41, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_2, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_1[grid(4096, 9)](primals_8, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_2[grid(8192, 9)](primals_11, buf2, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(16384, 9)](primals_17, buf3, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_17
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 9)](primals_20, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_5[grid(65536, 9)](primals_26, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf6 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(131072, 9)](primals_29, buf6, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_29
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_7[grid(262144, 9)](primals_35, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_35
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_7[grid(262144, 9)](primals_38, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_view_8[grid(12, 4096)](primals_1, buf9, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf10 = extern_kernels.convolution(buf9, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf11 = buf10
del buf10
triton_poi_fused_convolution_9[grid(1048576)](buf11, primals_3,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
triton_poi_fused_add_div_10[grid(262144)](buf11, buf12, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf14 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192,
8192, 1, 32), torch.float32)
buf15 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192,
8192, 1, 32), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192,
8192, 1, 32), torch.float32)
triton_per_fused_native_layer_norm_11[grid(8192)](buf11, buf14,
buf15, buf16, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf17 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1),
torch.float32)
buf18 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1),
torch.float32)
triton_per_fused_native_layer_norm_12[grid(128)](buf14, buf15,
buf16, buf17, buf18, buf19, 128, 64, XBLOCK=1, num_warps=2,
num_stages=1)
buf20 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf23 = reinterpret_tensor(buf21, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf21
triton_per_fused_native_layer_norm_13[grid(4)](buf23, buf17, buf18,
buf19, buf20, 4, 32, XBLOCK=1, num_warps=2, num_stages=1)
buf24 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_native_layer_norm_relu_14[grid(256, 4096)](buf11,
buf20, buf23, primals_6, primals_7, buf24, 256, 4096, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_7
buf25 = extern_kernels.convolution(buf24, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf26 = buf16
del buf16
buf27 = buf15
del buf15
buf28 = buf14
del buf14
triton_per_fused_native_layer_norm_11[grid(8192)](buf25, buf26,
buf27, buf28, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf29 = buf19
del buf19
buf30 = buf18
del buf18
buf31 = buf17
del buf17
triton_per_fused_native_layer_norm_12[grid(128)](buf26, buf27,
buf28, buf29, buf30, buf31, 128, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del buf26
del buf27
del buf28
buf32 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf35 = reinterpret_tensor(buf33, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf33
triton_per_fused_native_layer_norm_13[grid(4)](buf35, buf29, buf30,
buf31, buf32, 4, 32, XBLOCK=1, num_warps=2, num_stages=1)
del buf29
del buf30
del buf31
buf36 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_native_layer_norm_relu_14[grid(256, 4096)](buf25,
buf32, buf35, primals_9, primals_10, buf36, 256, 4096, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_10
buf37 = extern_kernels.convolution(buf36, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf38 = buf13
del buf13
triton_poi_fused_add_convolution_div_15[grid(524288)](buf38,
primals_5, buf37, primals_12, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf37
del primals_12
del primals_5
buf39 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.float32)
triton_poi_fused_add_div_16[grid(131072)](buf38, buf39, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf40 = extern_kernels.convolution(buf39, primals_13, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf41 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096,
4096, 1, 16), torch.float32)
buf42 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096,
4096, 1, 16), torch.float32)
buf43 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096,
4096, 1, 16), torch.float32)
triton_per_fused_native_layer_norm_17[grid(4096)](buf38, buf41,
buf42, buf43, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf44 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1),
torch.float32)
buf45 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1),
torch.float32)
buf46 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1),
torch.float32)
triton_per_fused_native_layer_norm_18[grid(64)](buf41, buf42, buf43,
buf44, buf45, buf46, 64, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf47 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf48 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf124 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_19[grid
(4)](buf44, buf45, buf46, buf47, buf48, buf124, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf50 = buf38
del buf38
triton_poi_fused_native_layer_norm_20[grid(524288)](buf50, buf47,
buf48, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf51 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128),
torch.float32)
triton_poi_fused_native_layer_norm_relu_21[grid(512, 1024)](buf50,
primals_15, primals_16, buf51, 512, 1024, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_16
buf52 = extern_kernels.convolution(buf51, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf53 = buf43
del buf43
buf54 = buf42
del buf42
buf55 = buf41
del buf41
triton_per_fused_native_layer_norm_17[grid(4096)](buf52, buf53,
buf54, buf55, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf56 = buf46
del buf46
buf57 = buf45
del buf45
buf58 = buf44
del buf44
triton_per_fused_native_layer_norm_18[grid(64)](buf53, buf54, buf55,
buf56, buf57, buf58, 64, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf53
del buf54
del buf55
buf59 = reinterpret_tensor(buf48, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf48
buf60 = buf47
del buf47
buf62 = reinterpret_tensor(buf60, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf60
triton_per_fused_native_layer_norm_22[grid(4)](buf62, buf56, buf57,
buf58, buf59, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf56
del buf57
del buf58
buf63 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128),
torch.float32)
triton_poi_fused_native_layer_norm_relu_23[grid(512, 1024)](buf52,
buf59, buf62, primals_18, primals_19, buf63, 512, 1024, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_19
buf64 = extern_kernels.convolution(buf63, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf65 = buf40
del buf40
triton_poi_fused_add_convolution_div_24[grid(262144)](buf65,
primals_14, buf64, primals_21, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf64
del primals_14
del primals_21
buf66 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
triton_poi_fused_add_div_25[grid(65536)](buf65, buf66, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf67 = extern_kernels.convolution(buf66, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf68 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048,
2048, 1, 8), torch.float32)
buf69 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048,
2048, 1, 8), torch.float32)
buf70 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048,
2048, 1, 8), torch.float32)
triton_per_fused_native_layer_norm_26[grid(2048)](buf65, buf68,
buf69, buf70, 2048, 128, XBLOCK=32, num_warps=8, num_stages=1)
buf71 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1),
torch.float32)
buf72 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1),
torch.float32)
buf73 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1),
torch.float32)
triton_per_fused_native_layer_norm_27[grid(32)](buf68, buf69, buf70,
buf71, buf72, buf73, 32, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf74 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf75 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf123 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_28[grid
(4)](buf71, buf72, buf73, buf74, buf75, buf123, 4, 8, XBLOCK=1,
num_warps=2, num_stages=1)
buf77 = buf65
del buf65
triton_poi_fused_native_layer_norm_29[grid(262144)](buf77, buf74,
buf75, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf78 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_layer_norm_relu_30[grid(1024, 256)](buf77,
primals_24, primals_25, buf78, 1024, 256, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_25
buf79 = extern_kernels.convolution(buf78, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf80 = buf70
del buf70
buf81 = buf69
del buf69
buf82 = buf68
del buf68
triton_per_fused_native_layer_norm_26[grid(2048)](buf79, buf80,
buf81, buf82, 2048, 128, XBLOCK=32, num_warps=8, num_stages=1)
buf83 = buf73
del buf73
buf84 = buf72
del buf72
buf85 = buf71
del buf71
triton_per_fused_native_layer_norm_27[grid(32)](buf80, buf81, buf82,
buf83, buf84, buf85, 32, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf80
del buf81
del buf82
buf86 = reinterpret_tensor(buf75, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf75
buf87 = buf74
del buf74
buf89 = reinterpret_tensor(buf87, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf87
triton_per_fused_native_layer_norm_31[grid(4)](buf89, buf83, buf84,
buf85, buf86, 4, 8, XBLOCK=1, num_warps=2, num_stages=1)
del buf83
del buf84
del buf85
buf90 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_layer_norm_relu_32[grid(1024, 256)](buf79,
buf86, buf89, primals_27, primals_28, buf90, 1024, 256, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_28
buf91 = extern_kernels.convolution(buf90, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf92 = buf67
del buf67
triton_poi_fused_add_convolution_div_33[grid(131072)](buf92,
primals_23, buf91, primals_30, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf91
del primals_23
del primals_30
buf93 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512),
torch.float32)
triton_poi_fused_add_div_34[grid(32768)](buf92, buf93, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
buf94 = extern_kernels.convolution(buf93, primals_31, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf94, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf95 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024,
1024, 64, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024,
1024, 64, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024,
1024, 64, 1), torch.float32)
triton_per_fused_native_layer_norm_35[grid(1024)](buf92, buf95,
buf96, buf97, 1024, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf98 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1),
torch.float32)
buf99 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1),
torch.float32)
buf100 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1),
torch.float32)
triton_per_fused_native_layer_norm_36[grid(16)](buf95, buf96, buf97,
buf98, buf99, buf100, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf101 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf102 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf122 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_37[grid
(4)](buf98, buf99, buf100, buf101, buf102, buf122, 4, 4, XBLOCK
=1, num_warps=2, num_stages=1)
buf104 = buf92
del buf92
triton_poi_fused_native_layer_norm_38[grid(131072)](buf104, buf101,
buf102, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf105 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_layer_norm_relu_39[grid(2048, 64)](buf104,
primals_33, primals_34, buf105, 2048, 64, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_34
buf106 = extern_kernels.convolution(buf105, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf106, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf107 = buf97
del buf97
buf108 = buf96
del buf96
buf109 = buf95
del buf95
triton_per_fused_native_layer_norm_35[grid(1024)](buf106, buf107,
buf108, buf109, 1024, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf110 = buf99
del buf99
buf111 = buf98
del buf98
buf112 = buf100
del buf100
triton_per_fused_native_layer_norm_36[grid(16)](buf107, buf108,
buf109, buf110, buf111, buf112, 16, 64, XBLOCK=8, num_warps=4,
num_stages=1)
del buf107
del buf108
del buf109
buf113 = reinterpret_tensor(buf102, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf102
buf114 = buf101
del buf101
buf116 = reinterpret_tensor(buf114, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf114
triton_per_fused_native_layer_norm_40[grid(4)](buf116, buf110,
buf111, buf112, buf113, 4, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf110
del buf111
del buf112
buf117 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_layer_norm_relu_41[grid(2048, 64)](buf106,
buf113, buf116, primals_36, primals_37, buf117, 2048, 64,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_37
buf118 = extern_kernels.convolution(buf117, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf119 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch
.float32)
triton_poi_fused_add_convolution_div_42[grid(64, 512)](buf94,
primals_32, buf118, primals_39, buf119, 64, 512, XBLOCK=4,
YBLOCK=64, num_warps=4, num_stages=1)
del buf118
del buf94
del primals_32
del primals_39
buf121 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_41, reinterpret_tensor(buf119, (4,
8192), (8192, 1), 0), reinterpret_tensor(primals_40, (8192, 1),
(1, 8192), 0), alpha=1, beta=1, out=buf121)
del primals_41
return (reinterpret_tensor(buf121, (4,), (1,), 0), buf0, primals_4,
primals_6, buf1, primals_9, buf2, primals_13, primals_15, buf3,
primals_18, buf4, primals_22, primals_24, buf5, primals_27, buf6,
primals_31, primals_33, buf7, primals_36, buf8, buf9, buf11, buf12,
buf20, buf23, buf24, buf25, buf32, buf35, buf36, buf39, buf50,
buf51, buf52, buf59, buf62, buf63, buf66, buf77, buf78, buf79,
buf86, buf89, buf90, buf93, buf104, buf105, buf106, buf113, buf116,
buf117, reinterpret_tensor(buf119, (4, 8192), (8192, 1), 0),
primals_40, buf122, buf123, buf124)
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(ConvMeanPool, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = self.conv(input)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(MeanPoolConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, input_height, input_width, input_depth = output.size()
output_depth = int(input_depth / self.block_size_sq)
output_width = int(input_width * self.block_size)
output_height = int(input_height * self.block_size)
t_1 = output.reshape(batch_size, input_height, input_width, self.
block_size_sq, output_depth)
spl = t_1.split(self.block_size, 3)
stacks = [t_t.reshape(batch_size, input_height, output_width,
output_depth) for t_t in spl]
output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).reshape(batch_size, output_height, output_width, output_depth)
output = output.permute(0, 3, 1, 2)
return output
class UpSampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
bias=True):
super(UpSampleConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init, bias=bias)
self.depth_to_space = DepthToSpace(2)
def forward(self, input):
output = input
output = torch.cat((output, output, output, output), 1)
output = self.depth_to_space(output)
output = self.conv(output)
return output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, resample=None, hw=64
):
super(ResidualBlock, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.kernel_size = kernel_size
self.resample = resample
self.bn1 = None
self.bn2 = None
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
if resample == 'down':
self.bn1 = nn.LayerNorm([input_dim, hw, hw])
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
elif resample == 'up':
self.bn1 = nn.BatchNorm2d(input_dim)
self.bn2 = nn.BatchNorm2d(output_dim)
elif resample is None:
self.bn1 = nn.BatchNorm2d(output_dim)
self.bn2 = nn.LayerNorm([input_dim, hw, hw])
else:
raise Exception('invalid resample value')
if resample == 'down':
self.conv_shortcut = MeanPoolConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = ConvMeanPool(input_dim, output_dim, kernel_size=
kernel_size)
elif resample == 'up':
self.conv_shortcut = UpSampleConv(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = UpSampleConv(input_dim, output_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(output_dim, output_dim, kernel_size=
kernel_size)
elif resample is None:
self.conv_shortcut = IWConv2d(input_dim, output_dim,
kernel_size=1, he_init=False)
self.conv_1 = IWConv2d(input_dim, input_dim, kernel_size=
kernel_size, bias=False)
self.conv_2 = IWConv2d(input_dim, output_dim, kernel_size=
kernel_size)
else:
raise Exception('invalid resample value')
def forward(self, input):
if self.input_dim == self.output_dim and self.resample is None:
shortcut = input
else:
shortcut = self.conv_shortcut(input)
output = input
output = self.bn1(output)
output = self.relu1(output)
output = self.conv_1(output)
output = self.bn2(output)
output = self.relu2(output)
output = self.conv_2(output)
return shortcut + output
class IWDiscriminatorNew(nn.Module):
def __init__(self, input_size=64, n_image_channels=3):
super(IWDiscriminatorNew, self).__init__()
self.size = input_size
self.n_image_channels = n_image_channels
self.ssize = self.size // 16
self.conv1 = IWConv2d(n_image_channels, self.size, 3, he_init=False)
self.rb1 = ResidualBlock(self.size, 2 * self.size, 3, resample=
'down', hw=self.size)
self.rb2 = ResidualBlock(2 * self.size, 4 * self.size, 3, resample=
'down', hw=int(self.size / 2))
self.rb3 = ResidualBlock(4 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 4))
self.rb4 = ResidualBlock(8 * self.size, 8 * self.size, 3, resample=
'down', hw=int(self.size / 8))
self.ln1 = nn.Linear(self.ssize * self.ssize * 8 * self.size, 1)
def forward_last_feature(self, input):
output = input.contiguous()
output = output.view(-1, self.n_image_channels, self.size, self.size)
output = self.conv1(output)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = output.view(-1, self.ssize * self.ssize * 8 * self.size)
out_features = output
output = self.ln1(output)
output = output.view(-1)
return output, out_features
def forward_all_feature(self, input):
out_features_list = []
output = input.contiguous()
output = output.view(-1, self.n_image_channels, self.size, self.size)
output = self.conv1(output)
out_features_list.append(output)
output = self.rb1(output)
out_features_list.append(output)
output = self.rb2(output)
out_features_list.append(output)
output = self.rb3(output)
out_features_list.append(output)
output = self.rb4(output)
output = output.view(-1, self.ssize * self.ssize * 8 * self.size)
out_features_list.append(output)
output = self.ln1(output)
out_features_list.append(output)
output = output.view(-1)
return output, out_features_list
def forward(self, input_0):
primals_2 = self.conv1.conv.weight
primals_3 = self.conv1.conv.bias
primals_6 = self.rb1.bn1.weight
primals_7 = self.rb1.bn1.bias
primals_9 = self.rb1.bn2.weight
primals_10 = self.rb1.bn2.bias
primals_4 = self.rb1.conv_shortcut.conv.conv.weight
primals_5 = self.rb1.conv_shortcut.conv.conv.bias
primals_8 = self.rb1.conv_1.conv.weight
primals_11 = self.rb1.conv_2.conv.conv.weight
primals_12 = self.rb1.conv_2.conv.conv.bias
primals_15 = self.rb2.bn1.weight
primals_16 = self.rb2.bn1.bias
primals_18 = self.rb2.bn2.weight
primals_19 = self.rb2.bn2.bias
primals_13 = self.rb2.conv_shortcut.conv.conv.weight
primals_14 = self.rb2.conv_shortcut.conv.conv.bias
primals_17 = self.rb2.conv_1.conv.weight
primals_20 = self.rb2.conv_2.conv.conv.weight
primals_21 = self.rb2.conv_2.conv.conv.bias
primals_24 = self.rb3.bn1.weight
primals_25 = self.rb3.bn1.bias
primals_27 = self.rb3.bn2.weight
primals_28 = self.rb3.bn2.bias
primals_22 = self.rb3.conv_shortcut.conv.conv.weight
primals_23 = self.rb3.conv_shortcut.conv.conv.bias
primals_26 = self.rb3.conv_1.conv.weight
primals_29 = self.rb3.conv_2.conv.conv.weight
primals_30 = self.rb3.conv_2.conv.conv.bias
primals_33 = self.rb4.bn1.weight
primals_34 = self.rb4.bn1.bias
primals_36 = self.rb4.bn2.weight
primals_37 = self.rb4.bn2.bias
primals_31 = self.rb4.conv_shortcut.conv.conv.weight
primals_32 = self.rb4.conv_shortcut.conv.conv.bias
primals_35 = self.rb4.conv_1.conv.weight
primals_38 = self.rb4.conv_2.conv.conv.weight
primals_39 = self.rb4.conv_2.conv.conv.bias
primals_40 = self.ln1.weight
primals_41 = self.ln1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41])
return output[0]
|
MIC-DKFZ/mood
|
IWDiscriminator
| false
| 8,662
|
[
"Apache-2.0"
] | 42
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
https://github.com/MIC-DKFZ/mood/tree/a01303adb4256653b133e2f7cd4741d366b681f7
|
MetaAconC
|
import torch
import torch.nn as nn
class MetaAconC(nn.Module):
""" ACON activation (activate or not).
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1, k=1, s=1, r=16):
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
def forward(self, x):
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
beta = torch.sigmoid(self.fc2(self.fc1(y)))
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp2
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp7 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 1, 1), (16, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(16)](buf4, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_sub_3[grid(4)](primals_6, primals_7, buf5, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_4[grid(256)](buf5, primals_1, buf4,
primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_1, primals_2, primals_4, buf0, buf2, buf4, buf5
class MetaAconCNew(nn.Module):
""" ACON activation (activate or not).
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1, k=1, s=1, r=16):
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
def forward(self, input_0):
primals_6 = self.p1
primals_7 = self.p2
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
PoCInnovation/Koic
|
MetaAconC
| false
| 8,663
|
[
"MIT"
] | 13
|
eca53b53b7242c1e83213ef9408366ca0a346358
|
https://github.com/PoCInnovation/Koic/tree/eca53b53b7242c1e83213ef9408366ca0a346358
|
ConvBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1)
return reinterpret_tensor(buf7, (4, 400), (400, 1), 0
), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6
class ConvBlockNew(nn.Module):
def __init__(self):
super(ConvBlockNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
QinbinLi/FedKT
|
ConvBlock
| false
| 8,664
|
[
"MIT"
] | 14
|
0bb9a89ea266c057990a4a326b586ed3d2fb2df8
|
https://github.com/QinbinLi/FedKT/tree/0bb9a89ea266c057990a4a326b586ed3d2fb2df8
|
FixupResidual
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class FixupResidual(nn.Module):
def __init__(self, depth, num_residual):
super().__init__()
self.conv1 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)
self.conv2 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)
for p in self.conv1.parameters():
p.data.mul_(1 / math.sqrt(num_residual))
for p in self.conv2.parameters():
p.data.zero_()
self.bias1 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias2 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias3 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias4 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.scale = nn.Parameter(torch.ones([depth, 1, 1]))
def forward(self, x):
x = F.relu(x)
out = x + self.bias1
out = self.conv1(out)
out = out + self.bias2
out = F.relu(out)
out = out + self.bias3
out = self.conv2(out)
out = out * self.scale
out = out + self.bias4
return out + x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'num_residual': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp5 + tmp7
tmp9 = 0.0
tmp10 = tmp5 <= tmp9
tl.store(out_ptr0 + x0, tmp8, None)
tl.store(out_ptr1 + x0, tmp10, None)
@triton.jit
def triton_poi_fused_add_mul_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, None)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tmp6 + tmp9
tl.store(out_ptr0 + x0, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_4, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_8, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(16384)](primals_1, primals_2, buf0,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf2 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(16384)](buf1,
primals_4, primals_5, buf2, buf5, 16384, XBLOCK=256, num_warps=
4, num_stages=1)
del primals_4
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf4 = buf1
del buf1
triton_poi_fused_add_mul_relu_2[grid(16384)](buf3, primals_7,
primals_8, primals_1, buf4, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5
class FixupResidualNew(nn.Module):
def __init__(self, depth, num_residual):
super().__init__()
self.conv1 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)
self.conv2 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)
for p in self.conv1.parameters():
p.data.mul_(1 / math.sqrt(num_residual))
for p in self.conv2.parameters():
p.data.zero_()
self.bias1 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias2 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias3 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias4 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.scale = nn.Parameter(torch.ones([depth, 1, 1]))
def forward(self, input_0):
primals_2 = self.bias1
primals_4 = self.bias2
primals_5 = self.bias3
primals_7 = self.bias4
primals_8 = self.scale
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
PacktPublishing/Hands-On-Reinforcement-Learning-for-Games
|
FixupResidual
| false
| 8,665
|
[
"MIT"
] | 41
|
045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
|
https://github.com/PacktPublishing/Hands-On-Reinforcement-Learning-for-Games/tree/045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
|
MaxPooling
|
import torch
class MaxPooling(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
x = torch.cat((x.unsqueeze(dim=1), y.unsqueeze(dim=1)), dim=1)
return x.max(dim=1)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = tl.load(in_ptr0 + x0, tmp3 & xmask, other=0.0)
tmp5 = tmp0 >= tmp2
tl.full([1], 2, tl.int64)
tmp8 = tl.load(in_ptr1 + x0, tmp5 & xmask, other=0.0)
tmp9 = tl.where(tmp3, tmp4, tmp8)
tmp11 = tmp2 < tmp2
tmp12 = tl.load(in_ptr0 + x0, tmp11 & xmask, other=0.0)
tmp13 = tmp2 >= tmp2
tmp15 = tl.load(in_ptr1 + x0, tmp13 & xmask, other=0.0)
tmp16 = tl.where(tmp11, tmp12, tmp15)
tmp17 = triton_helpers.maximum(tmp9, tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MaxPoolingNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Qualcomm-AI-research/FrameExit
|
MaxPooling
| false
| 8,666
|
[
"BSD-3-Clause-Clear"
] | 21
|
fc5815fd092019d58bcac5d5e6fcc45ce666311f
|
https://github.com/Qualcomm-AI-research/FrameExit/tree/fc5815fd092019d58bcac5d5e6fcc45ce666311f
|
KLNormCriterion
|
import torch
import torch.nn as nn
class KLNormCriterion(nn.Module):
def __init__(self):
super(KLNormCriterion, self).__init__()
def forward(self, z_mean_pre, z_log_sigma_pre, z_mean_gt=None,
z_sigma_gt=None):
batch_size = z_mean_pre.size(0)
if z_mean_gt is None or z_sigma_gt is None:
"""
KL[N(z_mean_pre,z_sigma_pre)||N(0,I)]
"""
z_mean_sq = z_mean_pre * z_mean_pre
z_log_sigma_sq = 2 * z_log_sigma_pre
z_sigma_sq = torch.exp(z_log_sigma_sq)
kl_loss = 0.5 * torch.sum(z_mean_sq + z_sigma_sq -
z_log_sigma_sq - 1) / batch_size
else:
"""
KL[N(z_mean_pre,z_sigma_pre)||N(z_mean_gt,z_sigma_gt)]
"""
z_log_sigma_sq_pre = 2 * z_log_sigma_pre
z_sigma_sq_pre = torch.exp(z_log_sigma_sq_pre)
z_log_sigma_sq_gt = 2 * torch.log(z_sigma_gt + 0.0001)
z_sigma_sq_gt = z_sigma_gt ** 2
kl_loss = 0.5 * torch.sum(z_log_sigma_sq_gt -
z_log_sigma_sq_pre + z_sigma_sq_pre / z_sigma_sq_gt + (
z_mean_pre - z_mean_gt) ** 2 / z_sigma_sq_gt - 1) / batch_size
return kl_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_mul_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0 * tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 + tmp5
tmp7 = tmp6 - tmp4
tmp8 = 1.0
tmp9 = tmp7 - tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_exp_mul_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLNormCriterionNew(nn.Module):
def __init__(self):
super(KLNormCriterionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PaperCodeSubmission/ICML2020-697
|
KLNormCriterion
| false
| 8,667
|
[
"MIT"
] | 12
|
00f7732c236b9c6234e76a47dfebe5de314d5c01
|
https://github.com/PaperCodeSubmission/ICML2020-697/tree/00f7732c236b9c6234e76a47dfebe5de314d5c01
|
QNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
ps = action
a1, a2, a3, a4, a5, a6 = ps[:, :2], ps[:, 2:2 + 3], ps[:, 2 + 3:2 +
3 + 3], ps[:, 2 + 3 + 3:2 + 3 + 3 + 2], ps[:, 2 + 3 + 3 + 2:2 +
3 + 3 + 2 + 2], ps[:, 2 + 3 + 3 + 2 + 2:2 + 3 + 3 + 2 + 2 + 2]
a1_ = a1
a2_ = a2
a3_ = a3
a4_ = a4
a5_ = a5
a6_ = a6
a = torch.cat([a1_, a2_, a3_, a4_, a5_, a6_], dim=1)
xu = torch.cat([state, a], 1)
x1 = F.relu(self.linear1(xu))
x1 = F.relu(self.linear2(x1))
x1 = self.linear3(x1)
x2 = F.relu(self.linear4(xu))
x2 = F.relu(self.linear5(x2))
x2 = self.linear6(x2)
return x1, x2
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = -4 + x0
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp9 < tmp11
tmp13 = tmp12 & tmp6
tmp14 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp9 >= tmp11
tmp17 = tmp15 & tmp6
tmp18 = tl.load(in_ptr1 + (2 + 4 * x1 + (-2 + (-4 + x0))), tmp17 &
xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.where(tmp12, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp6, tmp19, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (1, 4), (4, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_2, primals_1, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8
), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(16)](buf8, primals_10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1,
4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetworkNew, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_1 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_9 = self.linear4.weight
primals_10 = self.linear4.bias
primals_2 = self.linear5.weight
primals_12 = self.linear5.bias
primals_13 = self.linear6.weight
primals_14 = self.linear6.bias
primals_5 = input_0
primals_11 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
QwQ2000/E2GAN
|
QNetwork
| false
| 8,668
|
[
"MIT"
] | 34
|
f27b715362de4459129206217d100ae5b6cf82c8
|
https://github.com/QwQ2000/E2GAN/tree/f27b715362de4459129206217d100ae5b6cf82c8
|
FixedSubnetConv
|
import math
import torch
import torch.multiprocessing
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
class FixedSubnetConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
None
def set_subnet(self):
output = self.clamped_scores().clone()
_, idx = self.clamped_scores().flatten().abs().sort()
p = int(self.prune_rate * self.clamped_scores().numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
flat_oup[idx[p:]] = 1
self.scores = torch.nn.Parameter(output)
self.scores.requires_grad = False
def clamped_scores(self):
return self.scores.abs()
def get_subnet(self):
return self.weight * self.scores
def forward(self, x):
w = self.get_subnet()
x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.multiprocessing
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_1, primals_2, primals_4, buf0
class FixedSubnetConvNew(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
None
def set_subnet(self):
output = self.clamped_scores().clone()
_, idx = self.clamped_scores().flatten().abs().sort()
p = int(self.prune_rate * self.clamped_scores().numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
flat_oup[idx[p:]] = 1
self.scores = torch.nn.Parameter(output)
self.scores.requires_grad = False
def clamped_scores(self):
return self.scores.abs()
def get_subnet(self):
return self.weight * self.scores
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = self.scores
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RICE-EIC/Robust_Scratch_Ticket
|
FixedSubnetConv
| false
| 8,669
|
[
"MIT"
] | 13
|
f77b41cdaab6db4922a6d4b5970db75a9bfc7257
|
https://github.com/RICE-EIC/Robust_Scratch_Ticket/tree/f77b41cdaab6db4922a6d4b5970db75a9bfc7257
|
ImpalaResidual
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ImpalaResidual(nn.Module):
"""
A residual block for an IMPALA CNN.
"""
def __init__(self, depth):
super().__init__()
self.conv1 = nn.Conv2d(depth, depth, 3, padding=1)
self.conv2 = nn.Conv2d(depth, depth, 3, padding=1)
def forward(self, x):
out = F.relu(x)
out = self.conv1(out)
out = F.relu(out)
out = self.conv2(out)
return out + x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, None)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tl.store(in_out_ptr0 + x0, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16384)](primals_1, buf0, 16384, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(16384)](buf2, primals_3,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_2[grid(16384)](buf4, primals_5,
primals_1, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, primals_2, primals_4, buf0, buf2
class ImpalaResidualNew(nn.Module):
"""
A residual block for an IMPALA CNN.
"""
def __init__(self, depth):
super().__init__()
self.conv1 = nn.Conv2d(depth, depth, 3, padding=1)
self.conv2 = nn.Conv2d(depth, depth, 3, padding=1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
PacktPublishing/Hands-On-Reinforcement-Learning-for-Games
|
ImpalaResidual
| false
| 8,670
|
[
"MIT"
] | 41
|
045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
|
https://github.com/PacktPublishing/Hands-On-Reinforcement-Learning-for-Games/tree/045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
|
distLinear
|
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import WeightNorm
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear(indim, outdim, bias=False)
self.class_wise_learnable_norm = True
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0)
if outdim <= 200:
self.scale_factor = 2
else:
self.scale_factor = 10
def forward(self, x):
x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm + 1e-05)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(1
).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 1e-05)
cos_dist = self.L(x_normalized)
scores = self.scale_factor * cos_dist
return scores
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'indim': 4, 'outdim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils.weight_norm import WeightNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3,
primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_2[grid(256)](primals_1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_mul_3[grid(256)](buf4, 256, XBLOCK=128, num_warps=
4, num_stages=1)
return buf4, buf1, primals_2, primals_3, buf0, reinterpret_tensor(buf2,
(64, 4), (4, 1), 0)
class distLinearNew(nn.Module):
def __init__(self, indim, outdim):
super(distLinearNew, self).__init__()
self.L = nn.Linear(indim, outdim, bias=False)
self.class_wise_learnable_norm = True
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0)
if outdim <= 200:
self.scale_factor = 2
else:
self.scale_factor = 10
def forward(self, input_0):
primals_2 = self.L.weight_g
primals_3 = self.L.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RafLaf/easy
|
distLinear
| false
| 8,671
|
[
"MIT"
] | 25
|
3e3603aef7dfb1cf469820330d695b93ba76dfd4
|
https://github.com/RafLaf/easy/tree/3e3603aef7dfb1cf469820330d695b93ba76dfd4
|
SelfAttentionLayer2
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import *
class SelfAttentionLayer2(nn.Module):
def __init__(self, dim, da):
super(SelfAttentionLayer2, self).__init__()
self.dim = dim
self.Wq = nn.Parameter(torch.zeros(self.dim, self.dim))
self.Wk = nn.Parameter(torch.zeros(self.dim, self.dim))
nn.init.xavier_uniform_(self.Wq.data, gain=1.414)
nn.init.xavier_uniform_(self.Wk.data, gain=1.414)
def forward(self, h):
h.shape[0]
assert self.dim == h.shape[1]
q = torch.matmul(h, self.Wq)
k = torch.matmul(h, self.Wk)
e = torch.matmul(q, k.t()) / math.sqrt(self.dim)
attention = F.softmax(e, dim=1)
attention = attention.mean(dim=0)
x = torch.matmul(attention, h)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'da': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.utils.data import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr0 + 1)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + 2)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + 3)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp14 = tl.load(in_ptr0 + 4)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr0 + 5)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp19 = tl.load(in_ptr0 + 6)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp22 = tl.load(in_ptr0 + 7)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp28 = tl.load(in_ptr0 + 8)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp30 = tl.load(in_ptr0 + 9)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr0 + 10)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr0 + 11)
tmp37 = tl.broadcast_to(tmp36, [XBLOCK])
tmp41 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp42 = tl.load(in_ptr0 + 12)
tmp43 = tl.broadcast_to(tmp42, [XBLOCK])
tmp44 = tl.load(in_ptr0 + 13)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp47 = tl.load(in_ptr0 + 14)
tmp48 = tl.broadcast_to(tmp47, [XBLOCK])
tmp50 = tl.load(in_ptr0 + 15)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp8 = tmp5 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp0 / tmp11
tmp18 = tmp15 + tmp17
tmp21 = tmp18 + tmp20
tmp24 = tmp21 + tmp23
tmp25 = tmp13 / tmp24
tmp26 = tmp12 + tmp25
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp38 = tmp35 + tmp37
tmp39 = tmp27 / tmp38
tmp40 = tmp26 + tmp39
tmp46 = tmp43 + tmp45
tmp49 = tmp46 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp41 / tmp52
tmp54 = tmp40 + tmp53
tmp55 = 4.0
tmp56 = tmp54 / tmp55
tl.store(out_ptr0 + x0, tmp56, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__softmax_mean_1[grid(4)](buf3, buf4, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf3
buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (1, 4), (4, 1), 0),
primals_1, out=buf5)
del buf4
return reinterpret_tensor(buf5, (4,), (1,), 0
), buf0, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf1
class SelfAttentionLayer2New(nn.Module):
def __init__(self, dim, da):
super(SelfAttentionLayer2New, self).__init__()
self.dim = dim
self.Wq = nn.Parameter(torch.zeros(self.dim, self.dim))
self.Wk = nn.Parameter(torch.zeros(self.dim, self.dim))
nn.init.xavier_uniform_(self.Wq.data, gain=1.414)
nn.init.xavier_uniform_(self.Wk.data, gain=1.414)
def forward(self, input_0):
primals_1 = self.Wq
primals_2 = self.Wk
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RUCAIBox/TG_CRS_Code
|
SelfAttentionLayer2
| false
| 8,672
|
[
"Apache-2.0"
] | 27
|
0428a3a069c4d0d4888f2d476dba2cafd7918524
|
https://github.com/RUCAIBox/TG_CRS_Code/tree/0428a3a069c4d0d4888f2d476dba2cafd7918524
|
NoiseLayer
|
import torch
from torch import nn
import torch.nn
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, x, noise=None):
if noise is None and self.noise is None:
noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device=
x.device, dtype=x.dtype)
elif noise is None:
noise = self.noise
x = x + self.weight.view(1, -1, 1, 1) * noise
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf2, buf1
class NoiseLayerNew(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
|
NoiseLayer
| false
| 8,673
|
[
"MIT"
] | 23
|
9078a48ec3474dacdd02693b051e3addef1c5697
|
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
|
CNN
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class CNN(nn.Module):
def __init__(self, num_classes):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.mp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 128, 5)
self.mp2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(128 * 13 * 13, 512)
self.fc2 = nn.Linear(512, num_classes)
def forward(self, x):
x = self.mp1(F.relu(self.conv1(x)))
x = self.mp2(F.relu(self.conv2(x)))
x = x.view(-1, 128 * 13 * 13)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 3600 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 230400 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 30
x2 = xindex // 1920
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7680 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 676
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 13
y1 = yindex // 13
y5 = yindex
y4 = yindex // 169
y6 = yindex % 169
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 6656 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 6656 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (3328 + x2 + 256 * y0 + 6656 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3456 + x2 + 256 * y0 + 6656 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 169 * x2 + 21632 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (512, 21632), (21632, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (4, 512), (512, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 25)](primals_4, buf0, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf2 = empty_strided_cuda((4, 64, 60, 60), (230400, 1, 3840, 64),
torch.float32)
triton_poi_fused_convolution_relu_1[grid(256, 3600)](buf1,
primals_2, buf2, 256, 3600, XBLOCK=16, YBLOCK=256, num_warps=8,
num_stages=1)
del buf1
del primals_2
buf3 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.float32)
buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(230400)](buf2, buf3,
buf4, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf5 = extern_kernels.convolution(buf3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 26, 26), (86528, 1, 3328, 128))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_3[grid(346112)](buf6, primals_5,
346112, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 128, 13, 13), (21632, 1, 1664, 128),
torch.int8)
buf8 = empty_strided_cuda((4, 128, 13, 13), (21632, 169, 13, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_4[grid(676, 128)](buf6,
buf7, buf8, 676, 128, XBLOCK=128, YBLOCK=8, num_warps=4,
num_stages=1)
buf9 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 21632), (21632, 1),
0), reinterpret_tensor(primals_6, (21632, 512), (1, 21632), 0),
out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(2048)](buf10, primals_7, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8,
(512, 4), (1, 512), 0), alpha=1, beta=1, out=buf11)
del primals_9
return (buf11, primals_1, primals_3, buf0, buf2, buf3, buf4, buf6, buf7,
reinterpret_tensor(buf8, (4, 21632), (21632, 1), 0), buf10,
primals_8, primals_6)
class CNNNew(nn.Module):
def __init__(self, num_classes):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.mp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 128, 5)
self.mp2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(128 * 13 * 13, 512)
self.fc2 = nn.Linear(512, num_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Psarpei/Handwritten-Text-Recognition
|
CNN
| false
| 8,674
|
[
"MIT"
] | 15
|
be8f12092e385f3e117ae79b08fb06d0681f67e3
|
https://github.com/Psarpei/Handwritten-Text-Recognition/tree/be8f12092e385f3e117ae79b08fb06d0681f67e3
|
SelfAttentionLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import *
class SelfAttentionLayer(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionLayer, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)))
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, h):
h.shape[0]
assert self.dim == h.shape[1]
e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze(
dim=1)
attention = F.softmax(e)
return torch.matmul(attention, h)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'da': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.utils.data import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0),
primals_1, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4,), (1,), 0
), buf1, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)
class SelfAttentionLayerNew(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionLayerNew, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)))
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RUCAIBox/TG_CRS_Code
|
SelfAttentionLayer
| false
| 8,675
|
[
"Apache-2.0"
] | 27
|
0428a3a069c4d0d4888f2d476dba2cafd7918524
|
https://github.com/RUCAIBox/TG_CRS_Code/tree/0428a3a069c4d0d4888f2d476dba2cafd7918524
|
StddevLayer
|
import torch
from torch import nn
import torch.nn
class StddevLayer(nn.Module):
def __init__(self, group_size=4, num_new_features=1):
super().__init__()
self.group_size = 4
self.num_new_features = 1
def forward(self, x):
b, c, h, w = x.shape
group_size = min(self.group_size, b)
y = x.reshape([group_size, -1, self.num_new_features, c // self.
num_new_features, h, w])
y = y - y.mean(0, keepdim=True)
y = (y ** 2).mean(0, keepdim=True)
y = (y + 1e-08) ** 0.5
y = y.mean([3, 4, 5], keepdim=True).squeeze(3)
y = y.expand(group_size, -1, -1, h, w).clone().reshape(b, self.
num_new_features, h, w)
z = torch.cat([x, y], dim=1)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_cat_mean_pow_sub_0(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = 64.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]),
tmp28, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64)
get_raw_stream(0)
triton_per_fused_add_cat_mean_pow_sub_0[grid(1)](arg0_1, buf2, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class StddevLayerNew(nn.Module):
def __init__(self, group_size=4, num_new_features=1):
super().__init__()
self.group_size = 4
self.num_new_features = 1
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
|
StddevLayer
| false
| 8,676
|
[
"MIT"
] | 23
|
9078a48ec3474dacdd02693b051e3addef1c5697
|
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
|
SoftCrossEntropyLoss
|
import torch
from torch import Tensor
from torch.backends import cudnn as cudnn
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from typing import List
class SoftCrossEntropyLoss(nn.Module):
"""Calculate the CrossEntropyLoss with soft targets.
:param weight: Weight to assign to each of the classes. Default: None
:type weight: list of float
:param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.
'none': no reduction,
'mean': the mean of the losses,
'sum': the sum of the losses.
:type reduction: str
"""
def __init__(self, weight: 'List[float]'=None, reduction: 'str'='mean'):
super().__init__()
if weight is None:
self.weight = None
else:
self.register_buffer('weight', torch.Tensor(weight))
self.reduction = reduction
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
"""Calculate the loss.
:param input: prediction logits
:param target: target probabilities
:return: loss
"""
n, k = input.shape
losses = input.new_zeros(n)
for i in range(k):
cls_idx = input.new_full((n,), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduction='none')
if self.weight is not None:
loss = loss * self.weight[i]
losses += target[:, i].float() * loss
if self.reduction == 'mean':
losses = losses.mean()
elif self.reduction == 'sum':
losses = losses.sum()
elif self.reduction != 'none':
raise ValueError(f'Unrecognized reduction: {self.reduction}')
return losses
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.backends import cudnn as cudnn
from torch import nn as nn
from torch.nn import init as init
from typing import List
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
tl.store(out_ptr2 + x2, tmp8, xmask)
tl.store(out_ptr3 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_nll_loss_forward_1(in_out_ptr1, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr4 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr4 + 4 * r0, None, eviction_policy='evict_last')
tmp59 = tl.load(in_ptr4 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr4 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = -tmp13
tmp15 = tl.full([1, 1], True, tl.int1)
tmp16 = 0.0
tmp17 = tl.where(tmp15, tmp14, tmp16)
tmp18 = tmp0 * tmp17
tmp22 = tl_math.exp(tmp21)
tmp23 = tl_math.exp(tmp20)
tmp24 = tmp22 + tmp23
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tl_math.log(tmp30)
tmp32 = tmp20 - tmp31
tmp33 = -tmp32
tmp34 = tl.where(tmp15, tmp33, tmp16)
tmp35 = tmp19 * tmp34
tmp36 = tmp18 + tmp35
tmp40 = tl_math.exp(tmp39)
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp44 = tl_math.exp(tmp38)
tmp45 = tmp43 + tmp44
tmp47 = tl_math.exp(tmp46)
tmp48 = tmp45 + tmp47
tmp49 = tl_math.log(tmp48)
tmp50 = tmp38 - tmp49
tmp51 = -tmp50
tmp52 = tl.where(tmp15, tmp51, tmp16)
tmp53 = tmp37 * tmp52
tmp54 = tmp36 + tmp53
tmp58 = tl_math.exp(tmp57)
tmp60 = tl_math.exp(tmp59)
tmp61 = tmp58 + tmp60
tmp63 = tl_math.exp(tmp62)
tmp64 = tmp61 + tmp63
tmp65 = tl_math.exp(tmp56)
tmp66 = tmp64 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp56 - tmp67
tmp69 = -tmp68
tmp70 = tl.where(tmp15, tmp69, tmp16)
tmp71 = tmp55 * tmp70
tmp72 = tmp54 + tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = tl.sum(tmp73, 1)[:, None]
tmp76 = 4.0
tmp77 = tmp75 / tmp76
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp77, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(16)](arg0_1, buf0, buf1, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_add_mean_mul_nll_loss_forward_1[grid(1)](buf7,
arg1_1, buf0, buf1, buf3, buf4, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg1_1
del buf0
del buf1
del buf3
del buf4
return buf7,
class SoftCrossEntropyLossNew(nn.Module):
"""Calculate the CrossEntropyLoss with soft targets.
:param weight: Weight to assign to each of the classes. Default: None
:type weight: list of float
:param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.
'none': no reduction,
'mean': the mean of the losses,
'sum': the sum of the losses.
:type reduction: str
"""
def __init__(self, weight: 'List[float]'=None, reduction: 'str'='mean'):
super().__init__()
if weight is None:
self.weight = None
else:
self.register_buffer('weight', torch.Tensor(weight))
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PushparajaMurugan/dauphin
|
SoftCrossEntropyLoss
| false
| 8,677
|
[
"Apache-2.0"
] | 18
|
4d9832c72288282e6b3d03be1b0ad8708282b005
|
https://github.com/PushparajaMurugan/dauphin/tree/4d9832c72288282e6b3d03be1b0ad8708282b005
|
CoralLayer
|
import torch
class CoralLayer(torch.nn.Module):
""" Implements CORAL layer described in
Cao, Mirjalili, and Raschka (2020)
*Rank Consistent Ordinal Regression for Neural Networks
with Application to Age Estimation*
Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008
Parameters
-----------
size_in : int
Number of input features for the inputs to the forward method, which
are expected to have shape=(num_examples, num_features).
num_classes : int
Number of classes in the dataset.
preinit_bias : bool (default=True)
If true, it will pre-initialize the biases to descending values in
[0, 1] range instead of initializing it to all zeros. This pre-
initialization scheme results in faster learning and better
generalization performance in practice.
"""
def __init__(self, size_in, num_classes, preinit_bias=True):
super().__init__()
self.size_in, self.size_out = size_in, 1
self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False)
if preinit_bias:
self.coral_bias = torch.nn.Parameter(torch.arange(num_classes -
1, 0, -1).float() / (num_classes - 1))
else:
self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes -
1).float())
def forward(self, x):
"""
Computes forward pass.
Parameters
-----------
x : torch.tensor, shape=(num_examples, num_features)
Input features.
Returns
-----------
logits : torch.tensor, shape=(num_examples, num_classes-1)
"""
return self.coral_weights(x) + self.coral_bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size_in': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3
x0 = xindex % 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(192)](buf0, primals_3, buf1, 192,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class CoralLayerNew(torch.nn.Module):
""" Implements CORAL layer described in
Cao, Mirjalili, and Raschka (2020)
*Rank Consistent Ordinal Regression for Neural Networks
with Application to Age Estimation*
Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008
Parameters
-----------
size_in : int
Number of input features for the inputs to the forward method, which
are expected to have shape=(num_examples, num_features).
num_classes : int
Number of classes in the dataset.
preinit_bias : bool (default=True)
If true, it will pre-initialize the biases to descending values in
[0, 1] range instead of initializing it to all zeros. This pre-
initialization scheme results in faster learning and better
generalization performance in practice.
"""
def __init__(self, size_in, num_classes, preinit_bias=True):
super().__init__()
self.size_in, self.size_out = size_in, 1
self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False)
if preinit_bias:
self.coral_bias = torch.nn.Parameter(torch.arange(num_classes -
1, 0, -1).float() / (num_classes - 1))
else:
self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes -
1).float())
def forward(self, input_0):
primals_3 = self.coral_bias
primals_1 = self.coral_weights.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Raschka-research-group/coral-pytorch
|
CoralLayer
| false
| 8,678
|
[
"MIT"
] | 32
|
6b85e287118476095bac85d6f3dabc6ffb89a326
|
https://github.com/Raschka-research-group/coral-pytorch/tree/6b85e287118476095bac85d6f3dabc6ffb89a326
|
AconC
|
import torch
import torch.nn as nn
class AconC(nn.Module):
""" ACON activation (activate or not).
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp2 * tmp5
tmp8 = tmp7 * tmp1
tmp9 = tmp6 + tmp8
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(4)](primals_1, primals_2, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf0, primals_3,
primals_4, primals_2, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf1, primals_3, primals_4, buf0
class AconCNew(nn.Module):
""" ACON activation (activate or not).
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, input_0):
primals_1 = self.p1
primals_2 = self.p2
primals_4 = self.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
PoCInnovation/Koic
|
AconC
| false
| 8,679
|
[
"MIT"
] | 13
|
eca53b53b7242c1e83213ef9408366ca0a346358
|
https://github.com/PoCInnovation/Koic/tree/eca53b53b7242c1e83213ef9408366ca0a346358
|
SimpleShortCut
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleShortCut(nn.Module):
def __init__(self, planes):
super().__init__()
self.planes = planes // 4
def forward(self, x):
return F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, self.planes, self.
planes), 'constant', 0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'planes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 4 % 6
x0 = xindex % 2
x3 = xindex // 24
x5 = xindex // 2 % 12
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-16 + 2 * x0 + 8 * x5 + 64 * x3), tmp5 &
xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x6, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(96)](arg0_1, buf0, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SimpleShortCutNew(nn.Module):
def __init__(self, planes):
super().__init__()
self.planes = planes // 4
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RaoefTaki/MNTDP-forked
|
SimpleShortCut
| false
| 8,680
|
[
"MIT"
] | 15
|
d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
|
https://github.com/RaoefTaki/MNTDP-forked/tree/d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
|
DoubleDeltaTransform
|
import torch
import torchaudio
class DoubleDeltaTransform(torch.nn.Module):
"""A transformation to compute delta and double delta features.
Args:
win_length (int): The window length to use for computing deltas (Default: 5).
mode (str): Mode parameter passed to padding (Default: replicate).
"""
def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None:
super().__init__()
self.win_length = win_length
self.mode = mode
self._delta = torchaudio.transforms.ComputeDeltas(win_length=self.
win_length, mode=self.mode)
def forward(self, X):
"""
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time).
Returns:
Tensor: specgram, deltas and double deltas of size (..., 3*freq, time).
"""
delta = self._delta(X)
double_delta = self._delta(delta)
return torch.hstack((X, delta, double_delta))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torchaudio
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 >
0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x2 = xindex
tmp0 = -2 + x0
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 >
0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask,
eviction_policy='evict_last')
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = 0.1
tmp12 = tmp10 * tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp18 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp19 = tmp18 * tmp11
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp14, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x3, tmp23, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32)
triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf2, (1, 64, 4), (256, 4, 1))
buf3 = buf0
del buf0
triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf5, (1, 64, 4), (256, 4, 1))
del buf3
del buf4
buf6 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf2
del buf5
return buf6,
class DoubleDeltaTransformNew(torch.nn.Module):
"""A transformation to compute delta and double delta features.
Args:
win_length (int): The window length to use for computing deltas (Default: 5).
mode (str): Mode parameter passed to padding (Default: replicate).
"""
def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None:
super().__init__()
self.win_length = win_length
self.mode = mode
self._delta = torchaudio.transforms.ComputeDeltas(win_length=self.
win_length, mode=self.mode)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RUB-SysSec/WaveFake
|
DoubleDeltaTransform
| false
| 8,681
|
[
"MIT"
] | 20
|
d52d51b9ccdb0cec3f484e84b228791f06b955be
|
https://github.com/RUB-SysSec/WaveFake/tree/d52d51b9ccdb0cec3f484e84b228791f06b955be
|
Conv2d
|
import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
def get_causal_padding(kernel_size, strides, dilation_rate, n_dims=2):
p_ = []
for i in range(n_dims - 1, -1, -1):
if strides[i] > 1 and dilation_rate[i] > 1:
raise ValueError("can't have the stride and dilation over 1")
p = (kernel_size[i] - strides[i]) * dilation_rate[i]
p_ += p, 0
return p_
def get_same_padding(kernel_size, strides, dilation_rate, n_dims=2):
p_ = []
for i in range(n_dims - 1, -1, -1):
if strides[i] > 1 and dilation_rate[i] > 1:
raise ValueError("Can't have the stride and dilation rate over 1")
p = (kernel_size[i] - strides[i]) * dilation_rate[i]
if p % 2 == 0:
p = p // 2, p // 2
else:
p = int(np.ceil(p / 2)), int(np.floor(p / 2))
p_ += p
return tuple(p_)
def get_valid_padding(n_dims=2):
p_ = (0,) * 2 * n_dims
return p_
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='same', dilation=1, *args, **kwargs):
if isinstance(kernel_size, int):
kernel_size = (kernel_size,) * 2
if isinstance(stride, int):
stride = (stride,) * 2
if isinstance(dilation, int):
dilation = (dilation,) * 2
self.stride = stride
self.padding_str = padding.upper()
if self.padding_str == 'SAME':
self.pad_values = get_same_padding(kernel_size, stride, dilation)
elif self.padding_str == 'VALID':
self.pad_values = get_valid_padding()
elif self.padding_str == 'CAUSAL':
self.pad_values = get_causal_padding(kernel_size, stride, dilation)
else:
raise ValueError
self.condition = np.sum(self.pad_values) != 0
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, 0, dilation, *args, **kwargs)
def reset_parameters(self) ->None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def forward(self, x):
if self.condition:
x = F.pad(x, self.pad_values)
x = super(Conv2d, self).forward(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-10 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
def get_causal_padding(kernel_size, strides, dilation_rate, n_dims=2):
p_ = []
for i in range(n_dims - 1, -1, -1):
if strides[i] > 1 and dilation_rate[i] > 1:
raise ValueError("can't have the stride and dilation over 1")
p = (kernel_size[i] - strides[i]) * dilation_rate[i]
p_ += p, 0
return p_
def get_same_padding(kernel_size, strides, dilation_rate, n_dims=2):
p_ = []
for i in range(n_dims - 1, -1, -1):
if strides[i] > 1 and dilation_rate[i] > 1:
raise ValueError("Can't have the stride and dilation rate over 1")
p = (kernel_size[i] - strides[i]) * dilation_rate[i]
if p % 2 == 0:
p = p // 2, p // 2
else:
p = int(np.ceil(p / 2)), int(np.floor(p / 2))
p_ += p
return tuple(p_)
def get_valid_padding(n_dims=2):
p_ = (0,) * 2 * n_dims
return p_
class Conv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='same', dilation=1, *args, **kwargs):
if isinstance(kernel_size, int):
kernel_size = (kernel_size,) * 2
if isinstance(stride, int):
stride = (stride,) * 2
if isinstance(dilation, int):
dilation = (dilation,) * 2
self.stride = stride
self.padding_str = padding.upper()
if self.padding_str == 'SAME':
self.pad_values = get_same_padding(kernel_size, stride, dilation)
elif self.padding_str == 'VALID':
self.pad_values = get_valid_padding()
elif self.padding_str == 'CAUSAL':
self.pad_values = get_causal_padding(kernel_size, stride, dilation)
else:
raise ValueError
self.condition = np.sum(self.pad_values) != 0
super(Conv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, 0, dilation, *args, **kwargs)
def reset_parameters(self) ->None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Rayhane-mamah/Efficient-VDVAE
|
Conv2d
| false
| 8,682
|
[
"MIT"
] | 41
|
07bcb8ba58c228ab0ed62c5cf374c19a10932010
|
https://github.com/Rayhane-mamah/Efficient-VDVAE/tree/07bcb8ba58c228ab0ed62c5cf374c19a10932010
|
MyLinear
|
import torch
from torch import nn
import torch.nn
import torch.nn.functional as F
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class MyLinearNew(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, input_0):
primals_2 = self.weight
primals_1 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
|
MyLinear
| false
| 8,683
|
[
"MIT"
] | 23
|
9078a48ec3474dacdd02693b051e3addef1c5697
|
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
|
SPoC
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SPoC(nn.Module):
def __init__(self):
super(SPoC, self).__init__()
def forward(self, x):
return F.avg_pool2d(x, (x.size(-2), x.size(-1)))
def __repr__(self):
return self.__class__.__name__ + '()'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SPoCNew(nn.Module):
def __init__(self):
super(SPoCNew, self).__init__()
def __repr__(self):
return self.__class__.__name__ + '()'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RetrainIt/Perfect-Half-Million-Beauty-Product-Image-Recognition-Challenge
|
SPoC
| false
| 8,684
|
[
"Apache-2.0"
] | 15
|
080aa5ae2f2755c6dc10b7cdc910ec0f76bc82c3
|
https://github.com/RetrainIt/Perfect-Half-Million-Beauty-Product-Image-Recognition-Challenge/tree/080aa5ae2f2755c6dc10b7cdc910ec0f76bc82c3
|
Deconv2d
|
import torch
import torch.nn as nn
class Deconv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn
=False, activation='leakyrelu', dropout=False):
super(Deconv2d, self).__init__()
padding = int((kernel_size - 1) / 2)
self.conv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
self.dropout = nn.Dropout(p=0.5) if dropout else None
if activation == 'leakyrelu':
self.activation = nn.LeakyReLU(negative_slope=0.2)
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
raise ValueError('Not a valid activation, received {}'.format(
activation))
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1))
buf1 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(400)](buf0,
primals_2, buf1, buf2, 400, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, primals_1, primals_3, buf1
class Deconv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn
=False, activation='leakyrelu', dropout=False):
super(Deconv2dNew, self).__init__()
padding = int((kernel_size - 1) / 2)
self.conv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
self.dropout = nn.Dropout(p=0.5) if dropout else None
if activation == 'leakyrelu':
self.activation = nn.LeakyReLU(negative_slope=0.2)
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
raise ValueError('Not a valid activation, received {}'.format(
activation))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RQuispeC/pytorch-ACSCP
|
Deconv2d
| false
| 8,685
|
[
"MIT"
] | 25
|
c83f08632012c2245250ff9c5140814461db575c
|
https://github.com/RQuispeC/pytorch-ACSCP/tree/c83f08632012c2245250ff9c5140814461db575c
|
ConstMult
|
import torch
import torch.nn as nn
class ConstMult(nn.Module):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = nn.Parameter(torch.Tensor(1))
nn.init.constant_(self.alpha, alpha)
def forward(self, x):
return self.alpha * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class ConstMultNew(nn.Module):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = nn.Parameter(torch.Tensor(1))
nn.init.constant_(self.alpha, alpha)
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
RaoefTaki/MNTDP-forked
|
ConstMult
| false
| 8,686
|
[
"MIT"
] | 15
|
d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
|
https://github.com/RaoefTaki/MNTDP-forked/tree/d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
|
ncm_output
|
import torch
import torch.nn as nn
class ncm_output(nn.Module):
def __init__(self, indim, outdim):
super(ncm_output, self).__init__()
self.linear = nn.Linear(indim, outdim)
def forward(self, x):
return -1 * torch.norm(x.reshape(x.shape[0], 1, -1) - self.linear.
weight.transpose(0, 1).reshape(1, -1, x.shape[1]), dim=2).pow(2
) - self.linear.bias
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'indim': 4, 'outdim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_linalg_vector_norm_mul_pow_sub_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = -1.0
tmp22 = tmp20 * tmp21
tmp24 = tmp22 - tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_mul_pow_sub_0[grid(16)](primals_1,
primals_2, primals_3, buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_3
return buf0, primals_1, primals_2
class ncm_outputNew(nn.Module):
def __init__(self, indim, outdim):
super(ncm_outputNew, self).__init__()
self.linear = nn.Linear(indim, outdim)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_3 = self.linear.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RafLaf/easy
|
ncm_output
| false
| 8,687
|
[
"MIT"
] | 25
|
3e3603aef7dfb1cf469820330d695b93ba76dfd4
|
https://github.com/RafLaf/easy/tree/3e3603aef7dfb1cf469820330d695b93ba76dfd4
|
ValueFunction
|
import torch
import numpy as np
import torch.nn as nn
class ValueFunction(nn.Module):
def __init__(self, width, n_states):
super(ValueFunction, self).__init__()
self.linear1 = nn.Linear(n_states, width)
nn.init.normal_(self.linear1.weight, 0.0, 1 / np.sqrt(n_states))
torch.nn.init.constant_(self.linear1.bias, 0.0)
self.linear2 = nn.Linear(width, 1)
nn.init.normal_(self.linear2.weight, 0.0, 1 / np.sqrt(width))
torch.nn.init.constant_(self.linear2.bias, 0.0)
def forward(self, x):
x = torch.tanh(self.linear1(x))
value = self.linear2(x)
return value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'width': 4, 'n_states': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4
class ValueFunctionNew(nn.Module):
def __init__(self, width, n_states):
super(ValueFunctionNew, self).__init__()
self.linear1 = nn.Linear(n_states, width)
nn.init.normal_(self.linear1.weight, 0.0, 1 / np.sqrt(n_states))
torch.nn.init.constant_(self.linear1.bias, 0.0)
self.linear2 = nn.Linear(width, 1)
nn.init.normal_(self.linear2.weight, 0.0, 1 / np.sqrt(width))
torch.nn.init.constant_(self.linear2.bias, 0.0)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
RajGhugare19/VE-principle-for-model-based-RL
|
ValueFunction
| false
| 8,688
|
[
"MIT"
] | 16
|
a9f94dfc9317a0ccc60bc7c558dcec1ebc6d0c63
|
https://github.com/RajGhugare19/VE-principle-for-model-based-RL/tree/a9f94dfc9317a0ccc60bc7c558dcec1ebc6d0c63
|
DotProductAttention
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class BaseAttention(nn.Module):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
raise NotImplementedError
class DotProductAttention(BaseAttention):
"""Dot Product Attention"""
def __init__(self, dropout_rate=0.0, **kwargs):
"""Initialize DotProductAttention
Args:
dropout_rate (float): attention dropout_rate rate
"""
super().__init__()
self.dropout = nn.Dropout(dropout_rate)
def forward(self, q, k, v, attn_mask=None):
"""Forward
Args:
q (torch.Tensor): Query matrix, (B, T_q, D_q)
k (torch.Tensor): Key matrix, (B, T_k, D_k)
v (torch.Tensor): Value matrix, (B, T_v, D_v) T_v = T_k, D_v = D_k
attn_mask (torch.BoolTensor | None): Mask tensor. True element will be masked.
Returns:
output (B, T_q, D_v); attention (B, T_q, T_k)
"""
attention = torch.bmm(q, k.permute(0, 2, 1))
if attn_mask is not None:
attention.masked_fill_(attn_mask, -np.inf)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
output = attention.bmm(v)
return output, attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class BaseAttention(nn.Module):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
raise NotImplementedError
class DotProductAttentionNew(BaseAttention):
"""Dot Product Attention"""
def __init__(self, dropout_rate=0.0, **kwargs):
"""Initialize DotProductAttention
Args:
dropout_rate (float): attention dropout_rate rate
"""
super().__init__()
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
ROBINADC/BiGRU-CRF-with-Attention-for-NER
|
DotProductAttention
| false
| 8,689
|
[
"MIT"
] | 27
|
b9e037ebd6e1d56500ffb60c6030013982c17ded
|
https://github.com/ROBINADC/BiGRU-CRF-with-Attention-for-NER/tree/b9e037ebd6e1d56500ffb60c6030013982c17ded
|
Block
|
import torch
import torch.nn as nn
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q * self.scale @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_mul_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK
=4, YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q * self.scale @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_8 = self.mlp.fc2.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_12 = self.attn.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Pang-Yatian/Point-MAE
|
Block
| false
| 8,690
|
[
"MIT"
] | 42
|
61727f76e9d0c28babf422505073bd43c2f517bc
|
https://github.com/Pang-Yatian/Point-MAE/tree/61727f76e9d0c28babf422505073bd43c2f517bc
|
ContextAttentionLayer
|
import torch
from collections import OrderedDict
import torch.nn as nn
class Squeeze(nn.Module):
"""Squeeze wrapper for nn.Sequential."""
def forward(self, data):
return torch.squeeze(data)
class Temperature(nn.Module):
"""Temperature wrapper for nn.Sequential."""
def __init__(self, temperature):
super(Temperature, self).__init__()
self.temperature = temperature
def forward(self, data):
return data / self.temperature
class ContextAttentionLayer(nn.Module):
"""
Implements context attention as in the PaccMann paper (Figure 2C) in
Molecular Pharmaceutics.
With the additional option of having a hidden size in the context.
NOTE:
In tensorflow, weights were initialized from N(0,0.1). Instead, pytorch
uses U(-stddev, stddev) where stddev=1./math.sqrt(weight.size(1)).
"""
def __init__(self, reference_hidden_size: 'int',
reference_sequence_length: 'int', context_hidden_size: 'int',
context_sequence_length: 'int'=1, attention_size: 'int'=16,
individual_nonlinearity: 'type'=nn.Sequential(), temperature:
'float'=1.0):
"""Constructor
Arguments:
reference_hidden_size (int): Hidden size of the reference input
over which the attention will be computed (H).
reference_sequence_length (int): Sequence length of the reference
(T).
context_hidden_size (int): This is either simply the amount of
features used as context (G) or, if the context is a sequence
itself, the hidden size of each time point.
context_sequence_length (int): Hidden size in the context, useful
if context is also textual data, i.e. coming from nn.Embedding.
Defaults to 1.
attention_size (int): Hyperparameter of the attention layer,
defaults to 16.
individual_nonlinearities (type): This is an optional
nonlinearity applied to each projection. Defaults to
nn.Sequential(), i.e. no nonlinearity. Otherwise it expects a
torch.nn activation function, e.g. nn.ReLU().
temperature (float): Temperature parameter to smooth or sharpen the
softmax. Defaults to 1. Temperature > 1 flattens the
distribution, temperature below 1 makes it spikier.
"""
super().__init__()
self.reference_sequence_length = reference_sequence_length
self.reference_hidden_size = reference_hidden_size
self.context_sequence_length = context_sequence_length
self.context_hidden_size = context_hidden_size
self.attention_size = attention_size
self.individual_nonlinearity = individual_nonlinearity
self.temperature = temperature
self.reference_projection = nn.Sequential(OrderedDict([(
'projection', nn.Linear(reference_hidden_size, attention_size)),
('act_fn', individual_nonlinearity)]))
self.context_projection = nn.Sequential(OrderedDict([('projection',
nn.Linear(context_hidden_size, attention_size)), ('act_fn',
individual_nonlinearity)]))
if context_sequence_length > 1:
self.context_hidden_projection = nn.Sequential(OrderedDict([(
'projection', nn.Linear(context_sequence_length,
reference_sequence_length)), ('act_fn',
individual_nonlinearity)]))
else:
self.context_hidden_projection = nn.Sequential()
self.alpha_projection = nn.Sequential(OrderedDict([('projection',
nn.Linear(attention_size, 1, bias=False)), ('squeeze', Squeeze(
)), ('temperature', Temperature(self.temperature)), ('softmax',
nn.Softmax(dim=1))]))
def forward(self, reference: 'torch.Tensor', context: 'torch.Tensor'):
"""
Forward pass through a context attention layer
Arguments:
reference (torch.Tensor): This is the reference input on which
attention is computed.
Shape: batch_size x ref_seq_length x ref_hidden_size
context (torch.Tensor): This is the context used for attention.
Shape: batch_size x context_seq_length x context_hidden_size
Returns:
(output, attention_weights): A tuple of two Tensors, first one
containing the reference filtered by attention (shape:
batch_size x context_hidden_size x 1) and the second one the
attention weights (batch_size x context_sequence_length x 1).
"""
assert len(reference.shape) == 3, 'Reference tensor needs to be 3D'
assert len(context.shape) == 3, 'Context tensor needs to be 3D'
reference_attention = self.reference_projection(reference)
context_attention = self.context_hidden_projection(self.
context_projection(context).permute(0, 2, 1)).permute(0, 2, 1)
alphas = self.alpha_projection(torch.tanh(reference_attention +
context_attention))
output = torch.sum(reference * torch.unsqueeze(alphas, -1), 1)
return output, alphas
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'reference_hidden_size': 4, 'reference_sequence_length': 4,
'context_hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from collections import OrderedDict
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (1, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](buf2, primals_4, buf1,
primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
del primals_6
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_7, (16, 1), (1, 16), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_mul_sum_3[grid(16)](primals_1, buf5, buf6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf6, buf5, primals_1, reinterpret_tensor(primals_2, (16, 4), (4,
1), 0), buf2, buf5, primals_7
class Squeeze(nn.Module):
"""Squeeze wrapper for nn.Sequential."""
def forward(self, data):
return torch.squeeze(data)
class Temperature(nn.Module):
"""Temperature wrapper for nn.Sequential."""
def __init__(self, temperature):
super(Temperature, self).__init__()
self.temperature = temperature
def forward(self, data):
return data / self.temperature
class ContextAttentionLayerNew(nn.Module):
"""
Implements context attention as in the PaccMann paper (Figure 2C) in
Molecular Pharmaceutics.
With the additional option of having a hidden size in the context.
NOTE:
In tensorflow, weights were initialized from N(0,0.1). Instead, pytorch
uses U(-stddev, stddev) where stddev=1./math.sqrt(weight.size(1)).
"""
def __init__(self, reference_hidden_size: 'int',
reference_sequence_length: 'int', context_hidden_size: 'int',
context_sequence_length: 'int'=1, attention_size: 'int'=16,
individual_nonlinearity: 'type'=nn.Sequential(), temperature:
'float'=1.0):
"""Constructor
Arguments:
reference_hidden_size (int): Hidden size of the reference input
over which the attention will be computed (H).
reference_sequence_length (int): Sequence length of the reference
(T).
context_hidden_size (int): This is either simply the amount of
features used as context (G) or, if the context is a sequence
itself, the hidden size of each time point.
context_sequence_length (int): Hidden size in the context, useful
if context is also textual data, i.e. coming from nn.Embedding.
Defaults to 1.
attention_size (int): Hyperparameter of the attention layer,
defaults to 16.
individual_nonlinearities (type): This is an optional
nonlinearity applied to each projection. Defaults to
nn.Sequential(), i.e. no nonlinearity. Otherwise it expects a
torch.nn activation function, e.g. nn.ReLU().
temperature (float): Temperature parameter to smooth or sharpen the
softmax. Defaults to 1. Temperature > 1 flattens the
distribution, temperature below 1 makes it spikier.
"""
super().__init__()
self.reference_sequence_length = reference_sequence_length
self.reference_hidden_size = reference_hidden_size
self.context_sequence_length = context_sequence_length
self.context_hidden_size = context_hidden_size
self.attention_size = attention_size
self.individual_nonlinearity = individual_nonlinearity
self.temperature = temperature
self.reference_projection = nn.Sequential(OrderedDict([(
'projection', nn.Linear(reference_hidden_size, attention_size)),
('act_fn', individual_nonlinearity)]))
self.context_projection = nn.Sequential(OrderedDict([('projection',
nn.Linear(context_hidden_size, attention_size)), ('act_fn',
individual_nonlinearity)]))
if context_sequence_length > 1:
self.context_hidden_projection = nn.Sequential(OrderedDict([(
'projection', nn.Linear(context_sequence_length,
reference_sequence_length)), ('act_fn',
individual_nonlinearity)]))
else:
self.context_hidden_projection = nn.Sequential()
self.alpha_projection = nn.Sequential(OrderedDict([('projection',
nn.Linear(attention_size, 1, bias=False)), ('squeeze', Squeeze(
)), ('temperature', Temperature(self.temperature)), ('softmax',
nn.Softmax(dim=1))]))
def forward(self, input_0, input_1):
primals_3 = self.reference_projection.projection.weight
primals_4 = self.reference_projection.projection.bias
primals_5 = self.context_projection.projection.weight
primals_6 = self.context_projection.projection.bias
primals_7 = self.alpha_projection.projection.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
PaccMann/paccmann_predictor
|
ContextAttentionLayer
| false
| 8,691
|
[
"MIT"
] | 19
|
58071311310c45c1efabb34a4003b96a1c58901a
|
https://github.com/PaccMann/paccmann_predictor/tree/58071311310c45c1efabb34a4003b96a1c58901a
|
StyleMod
|
import torch
from torch import nn
import torch.nn
import torch.nn.functional as F
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class StyleMod(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleMod, self).__init__()
self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale
=use_wscale)
def forward(self, x, latent):
style = self.lin(latent)
shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1]
style = style.view(shape)
x = x * (style[:, 0] + 1.0) + style[:, 1]
return x
def get_inputs():
return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 4
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last')
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tmp1 + tmp4
tmp6 = tmp5 + tmp3
tmp7 = tmp0 * tmp6
tmp10 = tmp9 * tmp3
tmp11 = tmp8 + tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + x3, tmp12, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8,), (1,))
assert_size_stride(primals_2, (8, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1,
buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_1
return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class StyleModNew(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleModNew, self).__init__()
self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale
=use_wscale)
def forward(self, input_0, input_1):
primals_2 = self.lin.weight
primals_1 = self.lin.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
|
StyleMod
| false
| 8,692
|
[
"MIT"
] | 23
|
9078a48ec3474dacdd02693b051e3addef1c5697
|
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
|
DC
|
import torch
from torch import nn
import torch.nn.functional
class DC(nn.Module):
def __init__(self, nb_classes):
super(DC, self).__init__()
self.softmax = nn.Softmax(1)
self.nb_classes = nb_classes
@staticmethod
def onehot(gt, shape):
gt = gt.long()
y_onehot = torch.zeros(shape)
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
return y_onehot
def reshape(self, output, target):
output.shape[0]
if not all([(i == j) for i, j in zip(output.shape, target.shape)]):
target = self.onehot(target, output.shape)
target = target.permute(0, 2, 3, 4, 1)
output = output.permute(0, 2, 3, 4, 1)
None
return output, target
def dice(self, output, target):
output = self.softmax(output)
if not all([(i == j) for i, j in zip(output.shape, target.shape)]):
target = self.onehot(target, output.shape)
sum_axis = list(range(2, len(target.shape)))
s = 1e-19
intersect = torch.sum(output * target, sum_axis)
dice = 2 * intersect / (torch.sum(output, sum_axis) + torch.sum(
target, sum_axis) + s)
return 1.0 - dice.mean()
def forward(self, output, target):
result = self.dice(output, target)
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nb_classes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr1,
out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (32 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (48 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp21 = tl.where(xmask, tmp19, 0)
tmp22 = tl.sum(tmp21, 1)[:, None]
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp18, xmask)
tl.store(out_ptr3 + x3, tmp22, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp4 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp5 = tmp3 + tmp4
tmp6 = 1e-19
tmp7 = tmp5 + tmp6
tmp8 = tmp2 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 16.0
tmp13 = tmp11 / tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused__softmax_mul_sum_1[grid(16)](buf0, arg1_1, buf2,
buf3, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg1_1
del buf0
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused_add_div_mean_mul_rsub_2[grid(1)](buf6, buf2, buf3,
buf4, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf3
del buf4
return buf6,
class DCNew(nn.Module):
def __init__(self, nb_classes):
super(DCNew, self).__init__()
self.softmax = nn.Softmax(1)
self.nb_classes = nb_classes
@staticmethod
def onehot(gt, shape):
gt = gt.long()
y_onehot = torch.zeros(shape)
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
return y_onehot
def reshape(self, output, target):
output.shape[0]
if not all([(i == j) for i, j in zip(output.shape, target.shape)]):
target = self.onehot(target, output.shape)
target = target.permute(0, 2, 3, 4, 1)
output = output.permute(0, 2, 3, 4, 1)
None
return output, target
def dice(self, output, target):
output = self.softmax(output)
if not all([(i == j) for i, j in zip(output.shape, target.shape)]):
target = self.onehot(target, output.shape)
sum_axis = list(range(2, len(target.shape)))
s = 1e-19
intersect = torch.sum(output * target, sum_axis)
dice = 2 * intersect / (torch.sum(output, sum_axis) + torch.sum(
target, sum_axis) + s)
return 1.0 - dice.mean()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ReubenDo/InExtremIS
|
DC
| false
| 8,693
|
[
"MIT"
] | 17
|
1512ddf9b8c11c4d9f0ebd465d904ef3d539d350
|
https://github.com/ReubenDo/InExtremIS/tree/1512ddf9b8c11c4d9f0ebd465d904ef3d539d350
|
ExponentialUpdate
|
import torch
from torch import Tensor
from torch import nn
from torch.jit import Final
class ExponentialUpdate(nn.Module):
alpha: 'Final[int]'
def __init__(self, alpha: 'float'):
super().__init__()
self.alpha = float(alpha)
def forward(self, x: 'Tensor', state: 'Tensor') ->Tensor:
return x * (1 - self.alpha) + state * self.alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.jit import Final
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -3.0
tmp2 = tmp0 * tmp1
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ExponentialUpdateNew(nn.Module):
alpha: 'Final[int]'
def __init__(self, alpha: 'float'):
super().__init__()
self.alpha = float(alpha)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Rikorose/clc-dns-challenge-2020
|
ExponentialUpdate
| false
| 8,694
|
[
"Apache-2.0"
] | 12
|
4f1c078691327a75b3a338fe372ba356b450a6da
|
https://github.com/Rikorose/clc-dns-challenge-2020/tree/4f1c078691327a75b3a338fe372ba356b450a6da
|
Network
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, input_size, number_of_actions):
super(Network, self).__init__()
self.input_size = input_size
self.number_of_actions = number_of_actions
self.full_connection1 = nn.Linear(input_size, 30)
self.full_connection2 = nn.Linear(30, number_of_actions)
def forward(self, state):
hidden_neurons = F.relu(self.full_connection1(state))
q_values = self.full_connection2(hidden_neurons)
return q_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'number_of_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (30, 4), (4, 1))
assert_size_stride(primals_2, (30,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 30), (30, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf1,
primals_2, buf3, 1920, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 30),
(30, 1), 0), reinterpret_tensor(primals_4, (30, 4), (1, 30), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), primals_4, buf3
class NetworkNew(nn.Module):
def __init__(self, input_size, number_of_actions):
super(NetworkNew, self).__init__()
self.input_size = input_size
self.number_of_actions = number_of_actions
self.full_connection1 = nn.Linear(input_size, 30)
self.full_connection2 = nn.Linear(30, number_of_actions)
def forward(self, input_0):
primals_1 = self.full_connection1.weight
primals_2 = self.full_connection1.bias
primals_4 = self.full_connection2.weight
primals_5 = self.full_connection2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Radu-Raicea/self-driving-car-ai
|
Network
| false
| 8,695
|
[
"MIT"
] | 16
|
cf2b42472f7e78dd3bd530c0c7cd547988a8b0d2
|
https://github.com/Radu-Raicea/self-driving-car-ai/tree/cf2b42472f7e78dd3bd530c0c7cd547988a8b0d2
|
GatedPooling1
|
import torch
import torch.nn as nn
class GatedPooling1(nn.Module):
"""
Gated pooling as defined in https://arxiv.org/abs/1509.08985
This implementation is the L variant ( entire layer, one parameter )
"""
def __init__(self, kernel_size):
super(GatedPooling1, self).__init__()
self.avgpool = nn.AvgPool2d(kernel_size)
self.maxpool = nn.MaxPool2d(kernel_size)
self.transform = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
kernel_size)
torch.nn.init.kaiming_normal_(self.transform.weight)
def forward(self, x):
xs = [self.transform(x_filt.unsqueeze(1)).squeeze(1) for x_filt in
torch.unbind(x, dim=1)]
alpha = torch.sigmoid(torch.stack(xs, 1))
return alpha * self.maxpool(x) + (1 - alpha) * self.avgpool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp37 = tl.load(in_ptr5 + 16 * x2, xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr5 + (1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr5 + (2 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr5 + (3 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr5 + (4 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr5 + (5 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr5 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr5 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp52 = tl.load(in_ptr5 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr5 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp56 = tl.load(in_ptr5 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr5 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr5 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp62 = tl.load(in_ptr5 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr5 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp66 = tl.load(in_ptr5 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp15 + tmp7
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp14, tmp16, tmp17)
tmp19 = tmp0 >= tmp12
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr3 + x1, tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp23 + tmp7
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp22, tmp24, tmp25)
tmp27 = tmp0 >= tmp20
tl.full([1], 4, tl.int64)
tmp30 = tl.load(in_ptr4 + x1, tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tmp30 + tmp7
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tl.where(tmp22, tmp26, tmp33)
tmp35 = tl.where(tmp14, tmp18, tmp34)
tmp36 = tl.where(tmp4, tmp10, tmp35)
tmp39 = triton_helpers.maximum(tmp38, tmp37)
tmp41 = triton_helpers.maximum(tmp40, tmp39)
tmp43 = triton_helpers.maximum(tmp42, tmp41)
tmp45 = triton_helpers.maximum(tmp44, tmp43)
tmp47 = triton_helpers.maximum(tmp46, tmp45)
tmp49 = triton_helpers.maximum(tmp48, tmp47)
tmp51 = triton_helpers.maximum(tmp50, tmp49)
tmp53 = triton_helpers.maximum(tmp52, tmp51)
tmp55 = triton_helpers.maximum(tmp54, tmp53)
tmp57 = triton_helpers.maximum(tmp56, tmp55)
tmp59 = triton_helpers.maximum(tmp58, tmp57)
tmp61 = triton_helpers.maximum(tmp60, tmp59)
tmp63 = triton_helpers.maximum(tmp62, tmp61)
tmp65 = triton_helpers.maximum(tmp64, tmp63)
tmp67 = triton_helpers.maximum(tmp66, tmp65)
tmp68 = tmp38 + tmp37
tmp69 = tmp40 + tmp68
tmp70 = tmp42 + tmp69
tmp71 = tmp44 + tmp70
tmp72 = tmp46 + tmp71
tmp73 = tmp48 + tmp72
tmp74 = tmp50 + tmp73
tmp75 = tmp52 + tmp74
tmp76 = tmp54 + tmp75
tmp77 = tmp56 + tmp76
tmp78 = tmp58 + tmp77
tmp79 = tmp60 + tmp78
tmp80 = tmp62 + tmp79
tmp81 = tmp64 + tmp80
tmp82 = tmp66 + tmp81
tmp83 = 0.0625
tmp84 = tmp82 * tmp83
tmp85 = tl.sigmoid(tmp36)
tmp86 = tmp85 * tmp67
tmp87 = 1.0
tmp88 = tmp87 - tmp85
tmp89 = tmp88 * tmp84
tmp90 = tmp86 + tmp89
tl.store(out_ptr0 + x2, tmp36, xmask)
tl.store(out_ptr1 + x2, tmp67, xmask)
tl.store(out_ptr2 + x2, tmp84, xmask)
tl.store(out_ptr3 + x2, tmp90, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 0), primals_2, stride=(4, 4), padding=
(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 1, 1), (1, 1, 1, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 16), primals_2, stride=(4, 4), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 1, 1), (1, 1, 1, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 32), primals_2, stride=(4, 4), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 48), primals_2, stride=(4, 4), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1))
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0[
grid(16)](buf0, primals_3, buf1, buf2, buf3, primals_1, buf4,
buf5, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del primals_3
return buf7, primals_2, reinterpret_tensor(primals_1, (4, 1, 4, 4), (64,
16, 4, 1), 0), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16,
4, 1), 16), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4,
1), 32), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 48
), buf4, buf5, buf6
class GatedPooling1New(nn.Module):
"""
Gated pooling as defined in https://arxiv.org/abs/1509.08985
This implementation is the L variant ( entire layer, one parameter )
"""
def __init__(self, kernel_size):
super(GatedPooling1New, self).__init__()
self.avgpool = nn.AvgPool2d(kernel_size)
self.maxpool = nn.MaxPool2d(kernel_size)
self.transform = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
kernel_size)
torch.nn.init.kaiming_normal_(self.transform.weight)
def forward(self, input_0):
primals_2 = self.transform.weight
primals_3 = self.transform.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RicherMans/Dcase2018_pooling
|
GatedPooling1
| false
| 8,696
|
[
"Apache-2.0"
] | 13
|
10540502bba7215a1ba157614b39fedecb079d9b
|
https://github.com/RicherMans/Dcase2018_pooling/tree/10540502bba7215a1ba157614b39fedecb079d9b
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_dim=256):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, action_dim)
self.max_action = max_action
self.apply(weight_init)
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf4, primals_6, buf6, primals_4, buf7
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_dim=256):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, action_dim)
self.max_action = max_action
self.apply(weight_init)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
LQNew/LWDRL
|
Actor
| false
| 8,697
|
[
"MIT"
] | 11
|
0e4fab077a0cfbd27590b840557f4fda033c74ff
|
https://github.com/LQNew/LWDRL/tree/0e4fab077a0cfbd27590b840557f4fda033c74ff
|
Conv2d
|
import torch
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn
=False, activation='leakyrelu', dropout=False):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
self.dropout = nn.Dropout(p=0.5) if dropout else None
if activation == 'leakyrelu':
self.activation = nn.LeakyReLU(negative_slope=0.2)
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
raise ValueError('Not a valid activation, received {}'.format(
activation))
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(144)](buf0,
primals_2, buf1, buf2, 144, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, primals_1, primals_3, buf1
class Conv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn
=False, activation='leakyrelu', dropout=False):
super(Conv2dNew, self).__init__()
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
self.dropout = nn.Dropout(p=0.5) if dropout else None
if activation == 'leakyrelu':
self.activation = nn.LeakyReLU(negative_slope=0.2)
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
raise ValueError('Not a valid activation, received {}'.format(
activation))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RQuispeC/pytorch-ACSCP
|
Conv2d
| false
| 8,698
|
[
"MIT"
] | 25
|
c83f08632012c2245250ff9c5140814461db575c
|
https://github.com/RQuispeC/pytorch-ACSCP/tree/c83f08632012c2245250ff9c5140814461db575c
|
GatedPooling
|
import torch
import torch.nn as nn
class GatedPooling(nn.Module):
"""
Gated pooling as defined in https://arxiv.org/abs/1509.08985
This implementation is the LR variant
"""
def __init__(self, kernel_size, filter):
super(GatedPooling, self).__init__()
self.avgpool = nn.AvgPool2d(kernel_size)
self.maxpool = nn.MaxPool2d(kernel_size)
self.transform = nn.Conv2d(filter, filter, kernel_size=kernel_size,
stride=kernel_size)
def forward(self, x):
alpha = torch.sigmoid(self.transform(x))
return alpha * self.maxpool(x) + (1 - alpha) * self.avgpool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'filter': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 16 * x2, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr1 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr1 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + (4 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (5 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr1 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr1 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr1 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp34 = tmp4 + tmp3
tmp35 = tmp6 + tmp34
tmp36 = tmp8 + tmp35
tmp37 = tmp10 + tmp36
tmp38 = tmp12 + tmp37
tmp39 = tmp14 + tmp38
tmp40 = tmp16 + tmp39
tmp41 = tmp18 + tmp40
tmp42 = tmp20 + tmp41
tmp43 = tmp22 + tmp42
tmp44 = tmp24 + tmp43
tmp45 = tmp26 + tmp44
tmp46 = tmp28 + tmp45
tmp47 = tmp30 + tmp46
tmp48 = tmp32 + tmp47
tmp49 = 0.0625
tmp50 = tmp48 * tmp49
tmp51 = tl.sigmoid(tmp2)
tmp52 = tmp51 * tmp33
tmp53 = 1.0
tmp54 = tmp53 - tmp51
tmp55 = tmp54 * tmp50
tmp56 = tmp52 + tmp55
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp33, xmask)
tl.store(out_ptr1 + x2, tmp50, xmask)
tl.store(out_ptr2 + x2, tmp56, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0[
grid(16)](buf1, primals_2, primals_3, buf2, buf3, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf4, primals_1, primals_3, buf1, buf2, buf3
class GatedPoolingNew(nn.Module):
"""
Gated pooling as defined in https://arxiv.org/abs/1509.08985
This implementation is the LR variant
"""
def __init__(self, kernel_size, filter):
super(GatedPoolingNew, self).__init__()
self.avgpool = nn.AvgPool2d(kernel_size)
self.maxpool = nn.MaxPool2d(kernel_size)
self.transform = nn.Conv2d(filter, filter, kernel_size=kernel_size,
stride=kernel_size)
def forward(self, input_0):
primals_1 = self.transform.weight
primals_2 = self.transform.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RicherMans/Dcase2018_pooling
|
GatedPooling
| false
| 8,699
|
[
"Apache-2.0"
] | 13
|
10540502bba7215a1ba157614b39fedecb079d9b
|
https://github.com/RicherMans/Dcase2018_pooling/tree/10540502bba7215a1ba157614b39fedecb079d9b
|
StaticArchGenerator
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.init as weight_init
from torch.nn import Parameter
class ArchSampler(nn.Module):
def __init__(self, distrib_dim, all_same, deter_eval, var_names=None, *
args, **kwargs):
super().__init__()
self.distrib_dim = distrib_dim
self.all_same = all_same
self.deter_eval = deter_eval
self.frozen = False
self.log_probas = None
self.distrib_entropies = None
self._seq_probas = None
if var_names is not None:
assert len(var_names) == self.distrib_dim
self.var_names = var_names
def freeze(self):
self.frozen = True
def start_new_sequence(self):
self.log_probas = []
self.distrib_entropies = []
self._seq_probas = []
def nodes_to_prune(self, treshold):
nodes = []
for node, weight in zip(self.var_names, self().squeeze().unbind()):
if weight < treshold:
nodes.append(node)
return nodes
def sample_archs(self, batch_size, probas):
"""
Hook called by pytorch before each forward
:param: Current module
:param input: Input given to the module's forward
:return:
"""
self._check_probas(probas, self.all_same)
if probas.size(0) != batch_size:
if probas.size(0) != 1:
raise ValueError(
"Sampling probabilities dimensions {} doesn't match with batch size {}."
.format(probas.size(), batch_size))
if not self.all_same:
probas = probas.expand(batch_size, -1)
distrib = torch.distributions.Bernoulli(probas)
if not self.training and self.deter_eval:
samplings = (probas > 0.5).float()
else:
samplings = distrib.sample()
if self.all_same:
samplings = samplings.expand(batch_size, -1)
self._seq_probas.append(probas)
self.distrib_entropies.append(distrib.entropy())
self.log_probas.append(distrib.log_prob(samplings))
return samplings
def _check_probas(self, probas, all_same):
"""
:param probas: B_size*N_nodes Tensor containing the probability of each
arch being sampled in the nex forward.
:param all_same: if True, the same sampling will be used for the whole
batch in the next forward.
:return:
"""
if probas.dim() != 2 or all_same and probas.size(0) != 1:
raise ValueError(
'probas params has wrong dimension: {} (all_same={})'.
format(probas.size(), all_same))
if probas.size(-1) != self.distrib_dim:
raise ValueError(
'Should have exactly as many probas as the number of stochastic nodes({}), got {} instead.'
.format(self.distrib_dim, probas.size(-1)))
@property
def last_arch_probas(self):
return self.probas
@property
def last_sequence_probas(self):
"""
:return: The probabilities of each arch for the last sequence in
format (seq_len*batch_size*n_sampling_params)
"""
return torch.stack(self._seq_probas)
class StaticArchGenerator(ArchSampler):
def __init__(self, initial_p, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = Parameter(torch.Tensor(1, self.distrib_dim))
logit = np.log(initial_p / (1 - initial_p)
) if initial_p < 1 else float('inf')
weight_init.constant_(self.params, logit)
def forward(self, z=None):
if self.frozen and self.training:
raise RuntimeError(
'Trying to sample from a frozen distrib gen in training mode')
return self.params.sigmoid()
def entropy(self):
distrib = torch.distributions.Bernoulli(self.params.sigmoid())
return distrib.entropy()
def remove_var(self, name):
assert self.var_names
self.distrib_dim -= 1
remove_idx = self.var_names.index(name)
self.var_names.remove(name)
all_idx = torch.ones_like(self.params).bool()
all_idx[0, remove_idx] = 0
self.params = nn.Parameter(self.params[all_idx].unsqueeze(0))
def is_deterministic(self):
distrib = self()
return torch.equal(distrib, distrib ** 2)
def get_inputs():
return []
def get_init_inputs():
return [[], {'initial_p': 4, 'distrib_dim': 4, 'all_same': 4,
'deter_eval': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
import torch.nn.init as weight_init
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4)](primals_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_1
return buf0, buf0
class ArchSampler(nn.Module):
def __init__(self, distrib_dim, all_same, deter_eval, var_names=None, *
args, **kwargs):
super().__init__()
self.distrib_dim = distrib_dim
self.all_same = all_same
self.deter_eval = deter_eval
self.frozen = False
self.log_probas = None
self.distrib_entropies = None
self._seq_probas = None
if var_names is not None:
assert len(var_names) == self.distrib_dim
self.var_names = var_names
def freeze(self):
self.frozen = True
def start_new_sequence(self):
self.log_probas = []
self.distrib_entropies = []
self._seq_probas = []
def nodes_to_prune(self, treshold):
nodes = []
for node, weight in zip(self.var_names, self().squeeze().unbind()):
if weight < treshold:
nodes.append(node)
return nodes
def sample_archs(self, batch_size, probas):
"""
Hook called by pytorch before each forward
:param: Current module
:param input: Input given to the module's forward
:return:
"""
self._check_probas(probas, self.all_same)
if probas.size(0) != batch_size:
if probas.size(0) != 1:
raise ValueError(
"Sampling probabilities dimensions {} doesn't match with batch size {}."
.format(probas.size(), batch_size))
if not self.all_same:
probas = probas.expand(batch_size, -1)
distrib = torch.distributions.Bernoulli(probas)
if not self.training and self.deter_eval:
samplings = (probas > 0.5).float()
else:
samplings = distrib.sample()
if self.all_same:
samplings = samplings.expand(batch_size, -1)
self._seq_probas.append(probas)
self.distrib_entropies.append(distrib.entropy())
self.log_probas.append(distrib.log_prob(samplings))
return samplings
def _check_probas(self, probas, all_same):
"""
:param probas: B_size*N_nodes Tensor containing the probability of each
arch being sampled in the nex forward.
:param all_same: if True, the same sampling will be used for the whole
batch in the next forward.
:return:
"""
if probas.dim() != 2 or all_same and probas.size(0) != 1:
raise ValueError(
'probas params has wrong dimension: {} (all_same={})'.
format(probas.size(), all_same))
if probas.size(-1) != self.distrib_dim:
raise ValueError(
'Should have exactly as many probas as the number of stochastic nodes({}), got {} instead.'
.format(self.distrib_dim, probas.size(-1)))
@property
def last_arch_probas(self):
return self.probas
@property
def last_sequence_probas(self):
"""
:return: The probabilities of each arch for the last sequence in
format (seq_len*batch_size*n_sampling_params)
"""
return torch.stack(self._seq_probas)
class StaticArchGeneratorNew(ArchSampler):
def __init__(self, initial_p, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = Parameter(torch.Tensor(1, self.distrib_dim))
logit = np.log(initial_p / (1 - initial_p)
) if initial_p < 1 else float('inf')
weight_init.constant_(self.params, logit)
def entropy(self):
distrib = torch.distributions.Bernoulli(self.params.sigmoid())
return distrib.entropy()
def remove_var(self, name):
assert self.var_names
self.distrib_dim -= 1
remove_idx = self.var_names.index(name)
self.var_names.remove(name)
all_idx = torch.ones_like(self.params).bool()
all_idx[0, remove_idx] = 0
self.params = nn.Parameter(self.params[all_idx].unsqueeze(0))
def is_deterministic(self):
distrib = self()
return torch.equal(distrib, distrib ** 2)
def forward(self):
primals_1 = self.params
output = call([primals_1])
return output[0]
|
RaoefTaki/MNTDP-forked
|
StaticArchGenerator
| false
| 8,700
|
[
"MIT"
] | 15
|
d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
|
https://github.com/RaoefTaki/MNTDP-forked/tree/d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
|
PMA
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
class PMA(nn.Module):
def __init__(self, dim_X, dim, num_inds, **kwargs):
super().__init__()
self.I = nn.Parameter(torch.Tensor(num_inds, dim))
nn.init.xavier_uniform_(self.I)
self.mab = MAB(dim, dim_X, dim, **kwargs)
def forward(self, X, mask=None):
I = self.I if X.dim() == 2 else self.I.repeat(X.shape[0], 1, 1)
return self.mab(I, X, mask=mask)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_X': 4, 'dim': 4, 'num_inds': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * (-12 + x0)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_add_cat_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = x0
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr0 + (4 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (8 + x1), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp1 >= tmp13
tl.full([1], 4, tl.int64)
tmp20 = tl.load(in_ptr0 + (12 + x1), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.where(tmp15, tmp16, tmp20)
tmp22 = tl.where(tmp10, tmp11, tmp21)
tmp23 = tl.where(tmp5, tmp6, tmp22)
tmp24 = tmp0 + tmp23
tl.store(in_out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor(
primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_4
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor(
primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, primals_1, reinterpret_tensor(
primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](buf0, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused_cat_0[grid(16)](buf2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (16, 1), (1, 1), 0)
del buf2
triton_poi_fused_cat_0[grid(16)](buf1, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(buf5, (1, 16), (0, 1), 0
), out=buf6)
buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf6, buf9, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf6
buf10 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0)
del buf1
extern_kernels.mm(buf9, buf4, out=buf10)
buf11 = buf0
del buf0
triton_poi_fused_add_cat_2[grid(16)](buf11, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1,
4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf11,
buf12, primals_10, buf13, buf14, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf12
del primals_10
return (buf13, primals_1, primals_2, buf3, buf9, buf11, buf14,
primals_9, reinterpret_tensor(buf4, (1, 16), (1, 1), 0), buf5,
primals_3)
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
class PMANew(nn.Module):
def __init__(self, dim_X, dim, num_inds, **kwargs):
super().__init__()
self.I = nn.Parameter(torch.Tensor(num_inds, dim))
nn.init.xavier_uniform_(self.I)
self.mab = MAB(dim, dim_X, dim, **kwargs)
def forward(self, input_0):
primals_1 = self.I
primals_2 = self.mab.fc_q.weight
primals_4 = self.mab.fc_q.bias
primals_3 = self.mab.fc_k.weight
primals_6 = self.mab.fc_k.bias
primals_5 = self.mab.fc_v.weight
primals_8 = self.mab.fc_v.bias
primals_7 = self.mab.fc_o.weight
primals_10 = self.mab.fc_o.bias
primals_9 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
OpenXAIProject/dac
|
PMA
| false
| 8,701
|
[
"MIT"
] | 17
|
652776e21b56dcb68839363bb077d5c5ea28d81e
|
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
|
GlobalAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`,
to an output of size `dim`.
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
However they differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)`
Args:
attn_size (int): dimensionality of query and key
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, query_size, attn_size, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.query_size = query_size
self.attn_size = attn_size
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(query_size, attn_size, bias=False)
elif self.attn_type == 'mlp':
self.linear_query = nn.Linear(query_size, attn_size, bias=True)
self.attn_w = nn.Linear(attn_size, 1, bias=False)
elif self.attn_type == 'dot':
assert self.query_size == self.attn_size
def forward(self, query, memory_keys, memory_values, memory_masks):
"""
Args:
query (`FloatTensor`): (batch, query_size)
memory_keys (`FloatTensor`): (batch, seq_len, attn_size)
memory_values (`FloatTensor`): (batch, seq_len, attn_size)
memory_masks (`LongTensor`): (batch, seq_len)
Returns:
attn_score: attention distributions (batch, seq_len)
attn_memory: computed context vector, (batch, attn_size)
"""
batch_size, seq_len, attn_size = memory_keys.size()
if self.attn_type == 'mlp':
query_hidden = self.linear_query(query.unsqueeze(1)).expand(
batch_size, seq_len, attn_size)
attn_hidden = torch.tanh(query_hidden + memory_keys)
attn_score = self.attn_w(attn_hidden)
elif self.attn_type == 'dot':
attn_score = torch.bmm(memory_keys, query.unsqueeze(2))
elif self.attn_type == 'general':
query_hidden = self.linear_in(query)
attn_score = torch.bmm(memory_keys, query_hidden.unsqueeze(2))
attn_score = attn_score.squeeze(2)
if memory_masks is not None:
attn_score = attn_score * memory_masks
attn_score = attn_score.masked_fill(memory_masks == 0, -1e+18)
attn_score = F.softmax(attn_score, dim=1)
if memory_masks is not None:
attn_score = attn_score.masked_fill(memory_masks == 0, 0)
attn_memory = torch.sum(attn_score.unsqueeze(2) * memory_values, 1)
return attn_score, attn_memory
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'query_size': 4, 'attn_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_mul_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tmp3 * tmp0
tmp5 = -9.999999843067494e+17
tmp6 = tl.where(tmp2, tmp5, tmp4)
tmp8 = tmp7 == tmp1
tmp10 = tmp9 * tmp7
tmp11 = tl.where(tmp8, tmp5, tmp10)
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp14 = tmp13 == tmp1
tmp16 = tmp15 * tmp13
tmp17 = tl.where(tmp14, tmp5, tmp16)
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp20 = tmp19 == tmp1
tmp22 = tmp21 * tmp19
tmp23 = tl.where(tmp20, tmp5, tmp22)
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tl.store(out_ptr0 + x0, tmp24, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_mul_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tmp3 * tmp0
tmp5 = -9.999999843067494e+17
tmp6 = tl.where(tmp2, tmp5, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp12 = tl.where(tmp2, tmp1, tmp11)
tl.store(in_out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 1), (4,
1, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_eq_masked_fill_mul_0[grid(4)](arg2_1,
buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_eq_masked_fill_mul_1[grid(16)](buf3,
arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf1
del buf2
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(16)](buf3, arg3_1, buf4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del arg3_1
return buf3, buf4
class GlobalAttentionNew(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`,
to an output of size `dim`.
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
However they differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)`
Args:
attn_size (int): dimensionality of query and key
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, query_size, attn_size, attn_type='dot'):
super(GlobalAttentionNew, self).__init__()
self.query_size = query_size
self.attn_size = attn_size
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(query_size, attn_size, bias=False)
elif self.attn_type == 'mlp':
self.linear_query = nn.Linear(query_size, attn_size, bias=True)
self.attn_w = nn.Linear(attn_size, 1, bias=False)
elif self.attn_type == 'dot':
assert self.query_size == self.attn_size
def forward(self, input_0, input_1, input_2, input_3):
arg1_1 = input_0
arg0_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
Roc-Ng/HANet
|
GlobalAttention
| false
| 8,702
|
[
"MIT"
] | 34
|
e679703e9e725205424d87f750358fb4f62ceec5
|
https://github.com/Roc-Ng/HANet/tree/e679703e9e725205424d87f750358fb4f62ceec5
|
ScoreLayer
|
import torch
from torchvision.transforms import functional as F
from torch.nn import functional as F
import torch.nn as nn
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k, 1, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners
=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ScoreLayerNew(nn.Module):
def __init__(self, k):
super(ScoreLayerNew, self).__init__()
self.score = nn.Conv2d(k, 1, 1, 1)
def forward(self, input_0):
primals_1 = self.score.weight
primals_2 = self.score.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Res2Net/Res2Net-PoolNet
|
ScoreLayer
| false
| 8,703
|
[
"MIT"
] | 35
|
7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a
|
https://github.com/Res2Net/Res2Net-PoolNet/tree/7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a
|
ISAB
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
class PMA(nn.Module):
def __init__(self, dim_X, dim, num_inds, **kwargs):
super().__init__()
self.I = nn.Parameter(torch.Tensor(num_inds, dim))
nn.init.xavier_uniform_(self.I)
self.mab = MAB(dim, dim_X, dim, **kwargs)
def forward(self, X, mask=None):
I = self.I if X.dim() == 2 else self.I.repeat(X.shape[0], 1, 1)
return self.mab(I, X, mask=mask)
class ISAB(nn.Module):
def __init__(self, dim_X, dim, num_inds, **kwargs):
super().__init__()
self.pma = PMA(dim_X, dim, num_inds, **kwargs)
self.mab = MAB(dim_X, dim, dim, **kwargs)
def forward(self, X, mask=None):
return self.mab(X, self.pma(X, mask=mask))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_X': 4, 'dim': 4, 'num_inds': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * (-12 + x0)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_add_cat_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = x0
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr0 + (4 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (8 + x1), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp1 >= tmp13
tl.full([1], 4, tl.int64)
tmp20 = tl.load(in_ptr0 + (12 + x1), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.where(tmp15, tmp16, tmp20)
tmp22 = tl.where(tmp10, tmp11, tmp21)
tmp23 = tl.where(tmp5, tmp6, tmp22)
tmp24 = tmp0 + tmp23
tl.store(in_out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor(
primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_4
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor(
primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, primals_1, reinterpret_tensor(
primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](buf0, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused_cat_0[grid(16)](buf2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (16, 1), (1, 1), 0)
del buf2
triton_poi_fused_cat_0[grid(16)](buf1, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(buf5, (1, 16), (0, 1), 0
), out=buf6)
buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf6, buf9, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf10 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0)
del buf1
extern_kernels.mm(buf9, buf4, out=buf10)
buf11 = buf0
del buf0
triton_poi_fused_add_cat_2[grid(16)](buf11, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1,
4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf11,
buf12, primals_10, buf13, buf29, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_10
buf14 = buf12
del buf12
extern_kernels.addmm(primals_12, primals_1, reinterpret_tensor(
primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14)
del primals_11
del primals_12
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, buf13, reinterpret_tensor(
primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_14
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_16, buf13, reinterpret_tensor(
primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_16
buf17 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused_cat_0[grid(16)](buf14, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused_cat_0[grid(16)](buf16, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf19 = reinterpret_tensor(buf16, (16, 1), (1, 1), 0)
del buf16
triton_poi_fused_cat_0[grid(16)](buf15, buf19, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf20 = buf6
del buf6
extern_kernels.mm(buf17, reinterpret_tensor(buf19, (1, 16), (0, 1),
0), out=buf20)
buf23 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf20, buf23, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del buf20
buf24 = reinterpret_tensor(buf15, (16, 1), (1, 1), 0)
del buf15
extern_kernels.mm(buf23, buf18, out=buf24)
buf25 = buf14
del buf14
triton_poi_fused_add_cat_2[grid(16)](buf25, buf24, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0)
del buf24
extern_kernels.mm(buf25, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf26)
buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf25,
buf26, primals_18, buf27, buf28, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf26
del primals_18
return (buf27, primals_1, primals_2, buf3, buf9, buf11, buf13, buf17,
buf23, buf25, buf28, primals_17, reinterpret_tensor(buf18, (1, 16),
(1, 1), 0), buf19, primals_15, primals_13, buf29, primals_9,
reinterpret_tensor(buf4, (1, 16), (1, 1), 0), buf5, primals_3)
class MAB(nn.Module):
def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None):
super().__init__()
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_X, dim)
self.fc_k = nn.Linear(dim_Y, dim)
self.fc_v = nn.Linear(dim_Y, dim)
self.fc_o = nn.Linear(dim, dim)
self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity()
self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity()
self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity()
self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity()
def forward(self, X, Y, mask=None):
Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y)
Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0)
K_ = torch.cat(K.chunk(self.num_heads, -1), 0)
V_ = torch.cat(V.chunk(self.num_heads, -1), 0)
A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1])
if mask is not None:
mask = torch.stack([mask] * Q.shape[-2], -2)
mask = torch.cat([mask] * self.num_heads, 0)
A_logits.masked_fill_(mask, -float('inf'))
A = torch.softmax(A_logits, -1)
A.masked_fill_(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1)
O = self.ln1(Q + self.dropout1(attn))
O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O))))
return O
class PMA(nn.Module):
def __init__(self, dim_X, dim, num_inds, **kwargs):
super().__init__()
self.I = nn.Parameter(torch.Tensor(num_inds, dim))
nn.init.xavier_uniform_(self.I)
self.mab = MAB(dim, dim_X, dim, **kwargs)
def forward(self, X, mask=None):
I = self.I if X.dim() == 2 else self.I.repeat(X.shape[0], 1, 1)
return self.mab(I, X, mask=mask)
class ISABNew(nn.Module):
def __init__(self, dim_X, dim, num_inds, **kwargs):
super().__init__()
self.pma = PMA(dim_X, dim, num_inds, **kwargs)
self.mab = MAB(dim_X, dim, dim, **kwargs)
def forward(self, input_0):
primals_1 = self.pma.I
primals_2 = self.pma.mab.fc_q.weight
primals_4 = self.pma.mab.fc_q.bias
primals_3 = self.pma.mab.fc_k.weight
primals_6 = self.pma.mab.fc_k.bias
primals_5 = self.pma.mab.fc_v.weight
primals_8 = self.pma.mab.fc_v.bias
primals_7 = self.pma.mab.fc_o.weight
primals_10 = self.pma.mab.fc_o.bias
primals_9 = self.mab.fc_q.weight
primals_12 = self.mab.fc_q.bias
primals_11 = self.mab.fc_k.weight
primals_14 = self.mab.fc_k.bias
primals_13 = self.mab.fc_v.weight
primals_16 = self.mab.fc_v.bias
primals_15 = self.mab.fc_o.weight
primals_18 = self.mab.fc_o.bias
primals_17 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
OpenXAIProject/dac
|
ISAB
| false
| 8,704
|
[
"MIT"
] | 17
|
652776e21b56dcb68839363bb077d5c5ea28d81e
|
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
|
ExponentialDecay
|
import torch
from torch import Tensor
from torch import nn
from torch.jit import Final
class ExponentialUpdate(nn.Module):
alpha: 'Final[int]'
def __init__(self, alpha: 'float'):
super().__init__()
self.alpha = float(alpha)
def forward(self, x: 'Tensor', state: 'Tensor') ->Tensor:
return x * (1 - self.alpha) + state * self.alpha
class ExponentialDecay(nn.Module):
def __init__(self, alpha: 'float'):
super().__init__()
self.update_rule = ExponentialUpdate(alpha)
def forward(self, x: 'Tensor', state: 'Optional[Tensor]'=None):
out = torch.empty_like(x)
if state is None:
state = x[0]
for t in range(x.shape[0]):
state = self.update_rule(x[t], state)
out[t] = state
return out, state
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import Tensor
from torch import nn
from torch.jit import Final
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp7 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -3.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = 4.0
tmp10 = tmp7 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = tmp11 * tmp9
tmp13 = tmp6 + tmp12
tmp14 = tmp13 * tmp9
tmp15 = tmp4 + tmp14
tmp16 = tmp15 * tmp9
tmp17 = tmp2 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp0 == tmp4
tmp7 = -3.0
tmp8 = tmp6 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = 4.0
tmp14 = tmp11 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp15 * tmp13
tmp17 = tmp10 + tmp16
tmp18 = tmp17 * tmp13
tmp19 = tmp8 + tmp18
tmp20 = tl.full([1], 1, tl.int32)
tmp21 = tmp0 == tmp20
tmp22 = tl.full([1], 0, tl.int32)
tmp23 = tmp0 == tmp22
tmp25 = tl.where(tmp23, tmp15, tmp24)
tmp26 = tl.where(tmp21, tmp17, tmp25)
tmp27 = tl.where(tmp5, tmp19, tmp26)
tmp28 = tl.where(tmp2, tmp3, tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](arg0_1, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_1[grid(256)](buf1, arg0_1, buf0, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf0
return buf2, buf1
class ExponentialUpdate(nn.Module):
alpha: 'Final[int]'
def __init__(self, alpha: 'float'):
super().__init__()
self.alpha = float(alpha)
def forward(self, x: 'Tensor', state: 'Tensor') ->Tensor:
return x * (1 - self.alpha) + state * self.alpha
class ExponentialDecayNew(nn.Module):
def __init__(self, alpha: 'float'):
super().__init__()
self.update_rule = ExponentialUpdate(alpha)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
|
Rikorose/clc-dns-challenge-2020
|
ExponentialDecay
| false
| 8,705
|
[
"Apache-2.0"
] | 12
|
4f1c078691327a75b3a338fe372ba356b450a6da
|
https://github.com/Rikorose/clc-dns-challenge-2020/tree/4f1c078691327a75b3a338fe372ba356b450a6da
|
LayerNorm
|
import torch
from typing import Callable
from typing import Tuple
import torch.utils.data
from typing import Union
import torch.nn
import torch.cuda
import torch.backends.cudnn
def batch_elementwise(input: 'torch.Tensor', param: 'torch.Tensor', op:
'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', input_batch_dim:
'int'=0, pndim: 'int'=1) ->torch.Tensor:
"""
Do elementwise operation in groups.
:param input: input, any shape, [..., Ci, Cj, ...]
:param param: the parameter, shape [N, Ci, Cj....], in which case B % N == 0, or [Ci, Cj....]
:param input_batch_dim: which dimension is the batch in the input
:param op: the operation to perform
:param pndim: number of parameter dimensions without batch
:return: input with the op performed, the same shape as input
"""
if param.ndim == pndim + 1:
param = param.squeeze(0)
if param.ndim == pndim:
return op(input, param)
assert param.ndim == pndim + 1
assert input.shape[input_batch_dim] % param.shape[0] == 0
input_r = input.view(*input.shape[:input_batch_dim], param.shape[0], -1,
*input.shape[input_batch_dim + 1:])
param_r = param.view(*([1] * input_batch_dim), param.shape[0], *([1] *
(input_r.ndim - input_batch_dim - param.ndim)), *param.shape[1:])
return op(input_r, param_r).view_as(input)
def batch_bias_add(*args, **kwargs) ->torch.Tensor:
"""
Batch add bias to the inputs.
For more details, see batch_elementwise
"""
return batch_elementwise(*args, op=lambda a, b: a + b, **kwargs)
def batch_const_mul(*args, **kwargs) ->torch.Tensor:
"""
Batch multiplies bias to the inputs.
For more details, see batch_elementwise
"""
return batch_elementwise(*args, op=lambda a, b: a * b, **kwargs)
class MaskedModule(torch.nn.Module):
pass
class LayerNorm(MaskedModule):
def __init__(self, normalized_shape: 'Union[int, Tuple[int]]', eps=1e-05):
super().__init__()
if isinstance(normalized_shape, int):
normalized_shape = normalized_shape,
self.gamma = torch.nn.Parameter(torch.ones(*normalized_shape))
self.beta = torch.nn.Parameter(torch.zeros(*normalized_shape))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return batch_bias_add(batch_const_mul((x - mean) / (std + self.eps),
self.gamma), self.beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'normalized_shape': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from typing import Callable
from typing import Tuple
import torch.utils.data
from typing import Union
import torch.nn
import torch.cuda
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
def batch_elementwise(input: 'torch.Tensor', param: 'torch.Tensor', op:
'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', input_batch_dim:
'int'=0, pndim: 'int'=1) ->torch.Tensor:
"""
Do elementwise operation in groups.
:param input: input, any shape, [..., Ci, Cj, ...]
:param param: the parameter, shape [N, Ci, Cj....], in which case B % N == 0, or [Ci, Cj....]
:param input_batch_dim: which dimension is the batch in the input
:param op: the operation to perform
:param pndim: number of parameter dimensions without batch
:return: input with the op performed, the same shape as input
"""
if param.ndim == pndim + 1:
param = param.squeeze(0)
if param.ndim == pndim:
return op(input, param)
assert param.ndim == pndim + 1
assert input.shape[input_batch_dim] % param.shape[0] == 0
input_r = input.view(*input.shape[:input_batch_dim], param.shape[0], -1,
*input.shape[input_batch_dim + 1:])
param_r = param.view(*([1] * input_batch_dim), param.shape[0], *([1] *
(input_r.ndim - input_batch_dim - param.ndim)), *param.shape[1:])
return op(input_r, param_r).view_as(input)
def batch_bias_add(*args, **kwargs) ->torch.Tensor:
"""
Batch add bias to the inputs.
For more details, see batch_elementwise
"""
return batch_elementwise(*args, op=lambda a, b: a + b, **kwargs)
def batch_const_mul(*args, **kwargs) ->torch.Tensor:
"""
Batch multiplies bias to the inputs.
For more details, see batch_elementwise
"""
return batch_elementwise(*args, op=lambda a, b: a * b, **kwargs)
class MaskedModule(torch.nn.Module):
pass
class LayerNormNew(MaskedModule):
def __init__(self, normalized_shape: 'Union[int, Tuple[int]]', eps=1e-05):
super().__init__()
if isinstance(normalized_shape, int):
normalized_shape = normalized_shape,
self.gamma = torch.nn.Parameter(torch.ones(*normalized_shape))
self.beta = torch.nn.Parameter(torch.zeros(*normalized_shape))
self.eps = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RobertCsordas/modules
|
LayerNorm
| false
| 8,706
|
[
"BSD-3-Clause"
] | 22
|
efdb8790b074862581e035c9ab5bf889440a8023
|
https://github.com/RobertCsordas/modules/tree/efdb8790b074862581e035c9ab5bf889440a8023
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.