entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
CharbonnierLoss
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.001
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_sqrt_sub_sum_0[grid(1)](arg0_1, arg1_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CharbonnierLossNew(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super(CharbonnierLossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IceClear/MW-GAN
|
CharbonnierLoss
| false
| 8,288
|
[
"MIT"
] | 36
|
acb962468c984681c4a21f7b5c14588ca8f58c00
|
https://github.com/IceClear/MW-GAN/tree/acb962468c984681c4a21f7b5c14588ca8f58c00
|
Transform
|
import torch
import torch.nn as nn
def calc_mean_std(feat, eps=1e-05):
size = feat.size()
assert len(size) == 4
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
class SANet(nn.Module):
def __init__(self, in_planes):
super(SANet, self).__init__()
self.f = nn.Conv2d(in_planes, in_planes, (1, 1))
self.g = nn.Conv2d(in_planes, in_planes, (1, 1))
self.h = nn.Conv2d(in_planes, in_planes, (1, 1))
self.sm = nn.Softmax(dim=-1)
self.out_conv = nn.Conv2d(in_planes, in_planes, (1, 1))
def forward(self, content, style):
F = self.f(mean_variance_norm(content))
G = self.g(mean_variance_norm(style))
H = self.h(style)
b, c, h, w = F.size()
F = F.view(b, -1, w * h).permute(0, 2, 1)
b, c, h, w = G.size()
G = G.view(b, -1, w * h)
S = torch.bmm(F, G)
S = self.sm(S)
b, c, h, w = H.size()
H = H.view(b, -1, w * h)
O = torch.bmm(H, S.permute(0, 2, 1))
b, c, h, w = content.size()
O = O.view(b, c, h, w)
O = self.out_conv(O)
O += content
return O
class Transform(nn.Module):
def __init__(self, in_planes):
super(Transform, self).__init__()
self.sanet4_1 = SANet(in_planes=in_planes)
self.sanet5_1 = SANet(in_planes=in_planes)
self.merge_conv_pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.merge_conv = nn.Conv2d(in_planes, in_planes, (3, 3))
def forward(self, content4_1, style4_1, content5_1, style5_1):
self.upsample5_1 = nn.Upsample(size=(content4_1.size()[2],
content4_1.size()[3]), mode='nearest')
return self.merge_conv(self.merge_conv_pad(self.sanet4_1(content4_1,
style4_1) + self.upsample5_1(self.sanet5_1(content5_1, style5_1))))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mean_sub_var_0(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp18 = tl.sum(tmp3, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = tmp0 - tmp20
tmp22 = 15.0
tmp23 = tmp16 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = tmp21 / tmp26
tl.store(out_ptr2 + (r1 + 16 * x0), tmp27, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_arange_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_reflection_pad2d_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (3 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x1
))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (3 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr4 + (tmp13 + 4 * tmp9 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp16 = tmp14 + tmp15
tmp17 = tl.load(in_ptr6 + (tmp13 + 4 * tmp9 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp18 = tmp16 + tmp17
tmp19 = tmp4 + tmp18
tl.store(out_ptr0 + x7, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_15, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_22, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mean_sub_var_0[grid(16)](primals_1, buf4, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_mean_sub_var_0[grid(16)](primals_4, buf10, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
buf11 = extern_kernels.convolution(buf10, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = extern_kernels.convolution(primals_4, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = buf5
del buf5
triton_poi_fused_convolution_1[grid(256)](buf13, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf14 = buf11
del buf11
triton_poi_fused_convolution_1[grid(256)](buf14, primals_6, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf15 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf13, (4, 16, 4), (64, 1, 16
), 0), reinterpret_tensor(buf14, (4, 4, 16), (64, 16, 1), 0),
out=buf15)
buf18 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_2[grid(64)](buf15, buf18, 64, 16, XBLOCK=
32, num_warps=4, num_stages=1)
buf19 = buf12
del buf12
triton_poi_fused_convolution_1[grid(256)](buf19, primals_8, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf20 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf19, (4, 4, 16), (64, 16, 1
), 0), reinterpret_tensor(buf18, (4, 16, 16), (256, 1, 16), 0),
out=buf20)
buf21 = extern_kernels.convolution(reinterpret_tensor(buf20, (4, 4,
4, 4), (64, 16, 4, 1), 0), primals_9, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1))
buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_mean_sub_var_0[grid(16)](primals_11, buf26, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1))
buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_mean_sub_var_0[grid(16)](primals_14, buf32, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
buf33 = extern_kernels.convolution(buf32, primals_15, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1))
buf34 = extern_kernels.convolution(primals_14, primals_17, stride=(
1, 1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 4, 4, 4), (64, 16, 4, 1))
buf35 = buf27
del buf27
triton_poi_fused_convolution_1[grid(256)](buf35, primals_13, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf36 = buf33
del buf33
triton_poi_fused_convolution_1[grid(256)](buf36, primals_16, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
buf37 = buf15
del buf15
extern_kernels.bmm(reinterpret_tensor(buf35, (4, 16, 4), (64, 1, 16
), 0), reinterpret_tensor(buf36, (4, 4, 16), (64, 16, 1), 0),
out=buf37)
buf40 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_2[grid(64)](buf37, buf40, 64, 16, XBLOCK=
32, num_warps=4, num_stages=1)
del buf37
buf41 = buf34
del buf34
triton_poi_fused_convolution_1[grid(256)](buf41, primals_18, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_18
buf42 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf41, (4, 4, 16), (64, 16, 1
), 0), reinterpret_tensor(buf40, (4, 16, 16), (256, 1, 16), 0),
out=buf42)
buf43 = extern_kernels.convolution(reinterpret_tensor(buf42, (4, 4,
4, 4), (64, 16, 4, 1), 0), primals_19, stride=(1, 1), padding=(
0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf43, (4, 4, 4, 4), (64, 16, 4, 1))
buf44 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_arange_3[grid(4)](buf44, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf45 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(4)](buf45, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf46 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32
)
triton_poi_fused__unsafe_index_add_convolution_reflection_pad2d_5[grid
(576)](buf21, primals_10, primals_1, buf45, buf43, primals_20,
primals_11, buf46, 576, XBLOCK=256, num_warps=4, num_stages=1)
del buf21
del buf43
del primals_1
del primals_10
del primals_11
del primals_20
buf47 = extern_kernels.convolution(buf46, primals_21, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 4, 4, 4), (64, 16, 4, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_1[grid(256)](buf48, primals_22, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_22
return (buf48, primals_2, primals_4, primals_5, primals_7, primals_9,
primals_12, primals_14, primals_15, primals_17, primals_19,
primals_21, buf4, buf10, buf18, reinterpret_tensor(buf20, (4, 4, 4,
4), (64, 16, 4, 1), 0), buf26, buf32, buf40, reinterpret_tensor(
buf42, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf44, buf45, buf46,
reinterpret_tensor(buf41, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf35, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf36, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf19, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf13, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf14, (4, 16, 4), (64, 1, 16), 0))
def calc_mean_std(feat, eps=1e-05):
size = feat.size()
assert len(size) == 4
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
class SANet(nn.Module):
def __init__(self, in_planes):
super(SANet, self).__init__()
self.f = nn.Conv2d(in_planes, in_planes, (1, 1))
self.g = nn.Conv2d(in_planes, in_planes, (1, 1))
self.h = nn.Conv2d(in_planes, in_planes, (1, 1))
self.sm = nn.Softmax(dim=-1)
self.out_conv = nn.Conv2d(in_planes, in_planes, (1, 1))
def forward(self, content, style):
F = self.f(mean_variance_norm(content))
G = self.g(mean_variance_norm(style))
H = self.h(style)
b, c, h, w = F.size()
F = F.view(b, -1, w * h).permute(0, 2, 1)
b, c, h, w = G.size()
G = G.view(b, -1, w * h)
S = torch.bmm(F, G)
S = self.sm(S)
b, c, h, w = H.size()
H = H.view(b, -1, w * h)
O = torch.bmm(H, S.permute(0, 2, 1))
b, c, h, w = content.size()
O = O.view(b, c, h, w)
O = self.out_conv(O)
O += content
return O
class TransformNew(nn.Module):
def __init__(self, in_planes):
super(TransformNew, self).__init__()
self.sanet4_1 = SANet(in_planes=in_planes)
self.sanet5_1 = SANet(in_planes=in_planes)
self.merge_conv_pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.merge_conv = nn.Conv2d(in_planes, in_planes, (3, 3))
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.sanet4_1.f.weight
primals_3 = self.sanet4_1.f.bias
primals_5 = self.sanet4_1.g.weight
primals_6 = self.sanet4_1.g.bias
primals_7 = self.sanet4_1.h.weight
primals_8 = self.sanet4_1.h.bias
primals_9 = self.sanet4_1.out_conv.weight
primals_10 = self.sanet4_1.out_conv.bias
primals_12 = self.sanet5_1.f.weight
primals_13 = self.sanet5_1.f.bias
primals_15 = self.sanet5_1.g.weight
primals_16 = self.sanet5_1.g.bias
primals_17 = self.sanet5_1.h.weight
primals_18 = self.sanet5_1.h.bias
primals_19 = self.sanet5_1.out_conv.weight
primals_20 = self.sanet5_1.out_conv.bias
primals_21 = self.merge_conv.weight
primals_22 = self.merge_conv.bias
primals_1 = input_0
primals_4 = input_1
primals_11 = input_2
primals_14 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22])
return output[0]
|
HalbertCH/IEContraAST
|
Transform
| false
| 8,289
|
[
"MIT"
] | 39
|
50ee949f5302a7e4a3cae3226610c03462093c21
|
https://github.com/HalbertCH/IEContraAST/tree/50ee949f5302a7e4a3cae3226610c03462093c21
|
EuclideanLoss
|
import torch
import torch.nn as nn
class EuclideanLoss(nn.Module):
def __init__(self):
super(EuclideanLoss, self).__init__()
def forward(self, pre, gt):
N = pre.shape[0]
diff = torch.sum((pre - gt).pow(2)) / (N * 2)
return diff
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.125
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class EuclideanLossNew(nn.Module):
def __init__(self):
super(EuclideanLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IndigoPurple/EFENet
|
EuclideanLoss
| false
| 8,290
|
[
"MIT"
] | 11
|
e88234486f19534274a0a20badc251788ac67e31
|
https://github.com/IndigoPurple/EFENet/tree/e88234486f19534274a0a20badc251788ac67e31
|
loss_Textures
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn
class loss_Textures(nn.Module):
def __init__(self, nc=1, alpha=1.2, margin=0):
super(loss_Textures, self).__init__()
self.nc = nc
self.alpha = alpha
self.margin = margin
def forward(self, x, y):
xi = x.contiguous().view(x.size(0), -1, self.nc, x.size(2), x.size(3))
yi = y.contiguous().view(y.size(0), -1, self.nc, y.size(2), y.size(3))
xi2 = torch.sum(xi * xi, dim=2)
yi2 = torch.sum(yi * yi, dim=2)
out = nn.functional.relu(yi2.mul(self.alpha) - xi2 + self.margin)
return torch.mean(out)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = 1.2
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 - tmp5
tmp7 = 0.0
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_relu_sub_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class loss_TexturesNew(nn.Module):
def __init__(self, nc=1, alpha=1.2, margin=0):
super(loss_TexturesNew, self).__init__()
self.nc = nc
self.alpha = alpha
self.margin = margin
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IceClear/MW-GAN
|
loss_Textures
| false
| 8,291
|
[
"MIT"
] | 36
|
acb962468c984681c4a21f7b5c14588ca8f58c00
|
https://github.com/IceClear/MW-GAN/tree/acb962468c984681c4a21f7b5c14588ca8f58c00
|
BilinearMatrixAttention
|
import torch
import torch.nn as nn
class BilinearMatrixAttention(nn.Module):
"""
Adopted from AllenNLP. For now there is no activation function
"""
def __init__(self, matrix_1_dim: 'int', matrix_2_dim: 'int',
use_input_biases: 'bool'=False, label_dim: 'int'=1) ->None:
super().__init__()
if use_input_biases:
matrix_1_dim += 1
matrix_2_dim += 1
if label_dim == 1:
self.weight_matrix = nn.Parameter(torch.Tensor(matrix_1_dim,
matrix_2_dim))
else:
self.weight_matrix = nn.Parameter(torch.Tensor(label_dim,
matrix_1_dim, matrix_2_dim))
self.bias = nn.Parameter(torch.Tensor(1))
self.use_input_biases = use_input_biases
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight_matrix)
self.bias.data.fill_(0)
def forward(self, matrix_1: 'torch.Tensor', matrix_2: 'torch.Tensor'
) ->torch.Tensor:
if self.use_input_biases:
bias1 = matrix_1.new_ones(matrix_1.size()[:-1] + (1,))
bias2 = matrix_2.new_ones(matrix_2.size()[:-1] + (1,))
matrix_1 = torch.cat([matrix_1, bias1], -1)
matrix_2 = torch.cat([matrix_2, bias2], -1)
weight = self.weight_matrix
if weight.dim() == 2:
weight = weight.unsqueeze(0)
intermediate = torch.matmul(matrix_1.unsqueeze(1), weight)
final = torch.matmul(intermediate, matrix_2.unsqueeze(1).transpose(
2, 3))
return final.squeeze(1) + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'matrix_1_dim': 4, 'matrix_2_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (0, 4, 1), 0),
out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 1, 16, 4, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_3, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (16, 4, 4), (16,
4, 1), 0), out=buf2)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
return buf3, reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (16, 4, 4), (16, 1, 4), 0)
class BilinearMatrixAttentionNew(nn.Module):
"""
Adopted from AllenNLP. For now there is no activation function
"""
def __init__(self, matrix_1_dim: 'int', matrix_2_dim: 'int',
use_input_biases: 'bool'=False, label_dim: 'int'=1) ->None:
super().__init__()
if use_input_biases:
matrix_1_dim += 1
matrix_2_dim += 1
if label_dim == 1:
self.weight_matrix = nn.Parameter(torch.Tensor(matrix_1_dim,
matrix_2_dim))
else:
self.weight_matrix = nn.Parameter(torch.Tensor(label_dim,
matrix_1_dim, matrix_2_dim))
self.bias = nn.Parameter(torch.Tensor(1))
self.use_input_biases = use_input_biases
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight_matrix)
self.bias.data.fill_(0)
def forward(self, input_0, input_1):
primals_1 = self.weight_matrix
primals_4 = self.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Impavidity/relogic
|
BilinearMatrixAttention
| false
| 8,292
|
[
"MIT"
] | 24
|
f647106e143cd603b95b63e06ea530cdd516aefe
|
https://github.com/Impavidity/relogic/tree/f647106e143cd603b95b63e06ea530cdd516aefe
|
CharbonnierLoss
|
import torch
import torch.nn as nn
class CharbonnierLoss(nn.Module):
def __init__(self):
super(CharbonnierLoss, self).__init__()
def forward(self, pre, gt):
N = pre.shape[0]
diff = torch.sum(torch.sqrt((pre - gt).pow(2) + 0.001 ** 2)) / N
return diff
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_pow_sqrt_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class CharbonnierLossNew(nn.Module):
def __init__(self):
super(CharbonnierLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IndigoPurple/EFENet
|
CharbonnierLoss
| false
| 8,293
|
[
"MIT"
] | 11
|
e88234486f19534274a0a20badc251788ac67e31
|
https://github.com/IndigoPurple/EFENet/tree/e88234486f19534274a0a20badc251788ac67e31
|
ConvLayer
|
import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'):
super().__init__()
self._conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self._pad = nn.ReflectionPad2d(padding=kernel_size // 2)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self._pad(x)
x = self._conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class ConvLayerNew(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'):
super().__init__()
self._conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self._pad = nn.ReflectionPad2d(padding=kernel_size // 2)
def forward(self, input_0):
primals_1 = self._conv.weight
primals_3 = self._conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Inkln/StyleTransferWithCatalyst
|
ConvLayer
| false
| 8,294
|
[
"Apache-2.0"
] | 11
|
c3181ecdfd32160907efc2d9d917a55925c25c11
|
https://github.com/Inkln/StyleTransferWithCatalyst/tree/c3181ecdfd32160907efc2d9d917a55925c25c11
|
HardMish
|
import torch
from torch import nn as nn
def hard_mish(x, inplace: 'bool'=False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 2.0
tmp4 = tmp0 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = triton_helpers.minimum(tmp6, tmp3)
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hard_mish(x, inplace: 'bool'=False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMishNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(HardMishNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JDAI-CV/CoTNet-ObjectDetection-InstanceSegmentation
|
HardMish
| false
| 8,295
|
[
"Apache-2.0"
] | 34
|
2a546ef946989fc5bac8d819b3c93a9fdc83f241
|
https://github.com/JDAI-CV/CoTNet-ObjectDetection-InstanceSegmentation/tree/2a546ef946989fc5bac8d819b3c93a9fdc83f241
|
SpatialPyramidPooling
|
import torch
import torch.nn as nn
class SpatialPyramidPooling(nn.Module):
def __init__(self, pool_sizes=[5, 9, 13]):
super(SpatialPyramidPooling, self).__init__()
self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size //
2) for pool_size in pool_sizes])
def forward(self, x):
features = [maxpool(x) for maxpool in self.maxpools[::-1]]
features = torch.cat(features + [x], dim=1)
return features
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x7 = xindex
x3 = xindex // 64
x4 = xindex % 64
tmp116 = tl.load(in_ptr0 + x7, xmask)
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -2 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-10 + x7), tmp10 & xmask, other=float('-inf'))
tmp12 = -1 + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-9 + x7), tmp16 & xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-8 + x7), tmp23 & xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-7 + x7), tmp30 & xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + (-6 + x7), tmp37 & xmask, other=float('-inf'))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = -1 + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + (-6 + x7), tmp44 & xmask, other=float('-inf'))
tmp46 = triton_helpers.maximum(tmp45, tmp39)
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + (-5 + x7), tmp47 & xmask, other=float('-inf'))
tmp49 = triton_helpers.maximum(tmp48, tmp46)
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + (-4 + x7), tmp50 & xmask, other=float('-inf'))
tmp52 = triton_helpers.maximum(tmp51, tmp49)
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + (-3 + x7), tmp53 & xmask, other=float('-inf'))
tmp55 = triton_helpers.maximum(tmp54, tmp52)
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + (-2 + x7), tmp56 & xmask, other=float('-inf'))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + (-2 + x7), tmp63 & xmask, other=float('-inf'))
tmp65 = triton_helpers.maximum(tmp64, tmp58)
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + (-1 + x7), tmp66 & xmask, other=float('-inf'))
tmp68 = triton_helpers.maximum(tmp67, tmp65)
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + x7, tmp69 & xmask, other=float('-inf'))
tmp71 = triton_helpers.maximum(tmp70, tmp68)
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float('-inf'))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float('-inf'))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float('-inf'))
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float('-inf'))
tmp87 = triton_helpers.maximum(tmp86, tmp84)
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float('-inf'))
tmp90 = triton_helpers.maximum(tmp89, tmp87)
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float('-inf'))
tmp93 = triton_helpers.maximum(tmp92, tmp90)
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float('-inf'))
tmp96 = triton_helpers.maximum(tmp95, tmp93)
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float('-inf'))
tmp103 = triton_helpers.maximum(tmp102, tmp96)
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float('-inf'))
tmp106 = triton_helpers.maximum(tmp105, tmp103)
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float('-inf'))
tmp109 = triton_helpers.maximum(tmp108, tmp106)
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float('-inf'))
tmp112 = triton_helpers.maximum(tmp111, tmp109)
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float('-inf'))
tmp115 = triton_helpers.maximum(tmp114, tmp112)
tl.store(out_ptr0 + (x4 + 256 * x3), tmp115, xmask)
tl.store(out_ptr1 + (x4 + 256 * x3), tmp116, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 256 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13,
13], [1, 1], [6, 6])
buf1 = buf0[0]
del buf0
buf3 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9
], [1, 1], [4, 4])
buf4 = buf3[0]
del buf3
buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
buf6 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 128)
buf9 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 192)
get_raw_stream(0)
triton_poi_fused_cat_max_pool2d_with_indices_0[grid(256)](arg0_1,
buf6, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf7 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](buf1, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf8 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 64)
triton_poi_fused_cat_1[grid(256)](buf4, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
return buf10,
class SpatialPyramidPoolingNew(nn.Module):
def __init__(self, pool_sizes=[5, 9, 13]):
super(SpatialPyramidPoolingNew, self).__init__()
self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size //
2) for pool_size in pool_sizes])
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IDayday/YOLOv4_CAM
|
SpatialPyramidPooling
| false
| 8,296
|
[
"Apache-2.0"
] | 34
|
8df61f1c59c197126f0385c1ec1cf65a29a80cec
|
https://github.com/IDayday/YOLOv4_CAM/tree/8df61f1c59c197126f0385c1ec1cf65a29a80cec
|
decoder4
|
import torch
from torch import nn
class decoder4(nn.Module):
def __init__(self):
super(decoder4, self).__init__()
self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu11 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv12 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu12 = nn.ReLU(inplace=True)
self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv13 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu13 = nn.ReLU(inplace=True)
self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv14 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu14 = nn.ReLU(inplace=True)
self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv15 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv16 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu16 = nn.ReLU(inplace=True)
self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv17 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu17 = nn.ReLU(inplace=True)
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv18 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu18 = nn.ReLU(inplace=True)
self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv19 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x):
out = self.reflecPad11(x)
out = self.conv11(out)
out = self.relu11(out)
out = self.unpool(out)
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
out = self.reflecPad15(out)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool2(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.unpool3(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
return out
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(102400)](buf2, buf1, primals_3, buf3, 102400, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf4
, primals_5, buf5, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf6
, primals_7, buf7, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf8
, primals_9, buf9, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(165888)](buf11, buf10, primals_11, buf12, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(165888)](
buf13, primals_13, buf14, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf16, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid
(295936)](buf16, buf15, primals_15, buf17, 295936, XBLOCK=512,
num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(295936)](
buf18, primals_17, buf19, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_10[grid(12288)](buf21, primals_19,
12288, XBLOCK=128, num_warps=4, num_stages=1)
del primals_19
buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(262144)](
buf18, primals_17, buf22, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_17
buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(65536)](
buf15, primals_15, buf23, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_13[grid(131072)](
buf13, primals_13, buf24, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_14[grid(32768)](
buf10, primals_11, buf25, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf10
del primals_11
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf8, primals_9, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf8
del primals_9
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf6, primals_7, buf27, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf4, primals_5, buf28, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(16384)](
buf1, primals_3, buf29, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3,
buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22,
buf23, buf24, buf25, buf26, buf27, buf28, buf29)
class decoder4New(nn.Module):
def __init__(self):
super(decoder4New, self).__init__()
self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu11 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv12 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu12 = nn.ReLU(inplace=True)
self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv13 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu13 = nn.ReLU(inplace=True)
self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv14 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu14 = nn.ReLU(inplace=True)
self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv15 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv16 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu16 = nn.ReLU(inplace=True)
self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv17 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu17 = nn.ReLU(inplace=True)
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv18 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu18 = nn.ReLU(inplace=True)
self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv19 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, input_0):
primals_2 = self.conv11.weight
primals_3 = self.conv11.bias
primals_4 = self.conv12.weight
primals_5 = self.conv12.bias
primals_6 = self.conv13.weight
primals_7 = self.conv13.bias
primals_8 = self.conv14.weight
primals_9 = self.conv14.bias
primals_10 = self.conv15.weight
primals_11 = self.conv15.bias
primals_12 = self.conv16.weight
primals_13 = self.conv16.bias
primals_14 = self.conv17.weight
primals_15 = self.conv17.bias
primals_16 = self.conv18.weight
primals_17 = self.conv18.bias
primals_18 = self.conv19.weight
primals_19 = self.conv19.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
|
Holmes-Alan/RefVAE
|
decoder4
| false
| 8,297
|
[
"MIT"
] | 13
|
836b8f1168f1b0f923b609a48e202ace7806f79c
|
https://github.com/Holmes-Alan/RefVAE/tree/836b8f1168f1b0f923b609a48e202ace7806f79c
|
Normalization
|
import torch
import torch.nn as nn
class Normalization(nn.Module):
def __init__(self):
super(Normalization, self).__init__()
self.mean = nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(-
1, 1, 1))
self.std = nn.Parameter(torch.tensor([0.329, 0.224, 0.225]).view(-1,
1, 1))
def forward(self, img):
return (img - self.mean) / self.std
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 3
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (3, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(192)](primals_2, primals_1,
primals_3, buf0, 192, XBLOCK=128, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3
class NormalizationNew(nn.Module):
def __init__(self):
super(NormalizationNew, self).__init__()
self.mean = nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(-
1, 1, 1))
self.std = nn.Parameter(torch.tensor([0.329, 0.224, 0.225]).view(-1,
1, 1))
def forward(self, input_0):
primals_1 = self.mean
primals_3 = self.std
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Inkln/StyleTransferWithCatalyst
|
Normalization
| false
| 8,298
|
[
"Apache-2.0"
] | 11
|
c3181ecdfd32160907efc2d9d917a55925c25c11
|
https://github.com/Inkln/StyleTransferWithCatalyst/tree/c3181ecdfd32160907efc2d9d917a55925c25c11
|
GroupNorm32
|
import torch
import torch.nn.functional as F
from torch import nn
class GroupNorm32(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, x):
y = super().forward(x.float())
if self.swish == 1.0:
y = F.silu(y)
elif self.swish:
y = y * F.sigmoid(y * float(self.swish))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_groups': 1, 'num_channels': 4, 'swish': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 4.0
tmp29 = tmp27 * tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = tmp27 * tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4
del buf4
get_raw_stream(0)
triton_per_fused_mul_native_group_norm_sigmoid_0[grid(4)](buf3,
buf5, primals_1, primals_2, primals_3, buf0, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
return buf5, primals_1, primals_2, primals_3, buf0, buf3
class GroupNorm32New(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Jack000/glid-3
|
GroupNorm32
| false
| 8,299
|
[
"MIT"
] | 31
|
4a18efc2785339ebc743e149a7955e34fff436fb
|
https://github.com/Jack000/glid-3/tree/4a18efc2785339ebc743e149a7955e34fff436fb
|
ImageGradients
|
import torch
import torch as th
import torch.utils.data
class ImageGradients(th.nn.Module):
"""
Args:
c_in(int): number of channels expected in the images.
use_sobel(bool): if True, uses a (smoother) Sobel filter instead of simple
finite differences.
"""
def __init__(self, c_in, use_sobel=True):
super(ImageGradients, self).__init__()
if use_sobel:
self.dx = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=
False, groups=c_in)
self.dy = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=
False, groups=c_in)
self.dx.weight.data.zero_()
self.dx.weight.data[:, :, 0, 0] = -1
self.dx.weight.data[:, :, 0, 2] = 1
self.dx.weight.data[:, :, 1, 0] = -2
self.dx.weight.data[:, :, 1, 2] = 2
self.dx.weight.data[:, :, 2, 0] = -1
self.dx.weight.data[:, :, 2, 2] = 1
self.dy.weight.data.zero_()
self.dy.weight.data[:, :, 0, 0] = -1
self.dy.weight.data[:, :, 2, 0] = 1
self.dy.weight.data[:, :, 0, 1] = -2
self.dy.weight.data[:, :, 2, 1] = 2
self.dy.weight.data[:, :, 0, 2] = -1
self.dy.weight.data[:, :, 2, 2] = 1
else:
self.dx = th.nn.Conv2d(c_in, c_in, [1, 3], padding=(0, 1), bias
=False, groups=c_in)
self.dy = th.nn.Conv2d(c_in, c_in, [3, 1], padding=(1, 0), bias
=False, groups=c_in)
self.dx.weight.data.zero_()
self.dx.weight.data[:, :, 0, 0] = -1
self.dx.weight.data[:, :, 0, 1] = 1
self.dy.weight.data.zero_()
self.dy.weight.data[:, :, 0, 0] = -1
self.dy.weight.data[:, :, 1, 0] = 1
self.dx.weight.requires_grad = False
self.dy.weight.requires_grad = False
def forward(self, im):
return th.cat([self.dx(im), self.dy(im)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch as th
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 64 * x2 + x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x0 + 64 * x2 + (-4 + x1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, buf2, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 1, 16, 4))
del arg0_1
del buf0
buf3 = extern_kernels.convolution(buf2, arg2_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 1, 16, 4))
del arg2_1
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](buf1, buf3, buf4, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
del buf3
return buf4,
class ImageGradientsNew(th.nn.Module):
"""
Args:
c_in(int): number of channels expected in the images.
use_sobel(bool): if True, uses a (smoother) Sobel filter instead of simple
finite differences.
"""
def __init__(self, c_in, use_sobel=True):
super(ImageGradientsNew, self).__init__()
if use_sobel:
self.dx = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=
False, groups=c_in)
self.dy = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=
False, groups=c_in)
self.dx.weight.data.zero_()
self.dx.weight.data[:, :, 0, 0] = -1
self.dx.weight.data[:, :, 0, 2] = 1
self.dx.weight.data[:, :, 1, 0] = -2
self.dx.weight.data[:, :, 1, 2] = 2
self.dx.weight.data[:, :, 2, 0] = -1
self.dx.weight.data[:, :, 2, 2] = 1
self.dy.weight.data.zero_()
self.dy.weight.data[:, :, 0, 0] = -1
self.dy.weight.data[:, :, 2, 0] = 1
self.dy.weight.data[:, :, 0, 1] = -2
self.dy.weight.data[:, :, 2, 1] = 2
self.dy.weight.data[:, :, 0, 2] = -1
self.dy.weight.data[:, :, 2, 2] = 1
else:
self.dx = th.nn.Conv2d(c_in, c_in, [1, 3], padding=(0, 1), bias
=False, groups=c_in)
self.dy = th.nn.Conv2d(c_in, c_in, [3, 1], padding=(1, 0), bias
=False, groups=c_in)
self.dx.weight.data.zero_()
self.dx.weight.data[:, :, 0, 0] = -1
self.dx.weight.data[:, :, 0, 1] = 1
self.dy.weight.data.zero_()
self.dy.weight.data[:, :, 0, 0] = -1
self.dy.weight.data[:, :, 1, 0] = 1
self.dx.weight.requires_grad = False
self.dy.weight.requires_grad = False
def forward(self, input_0):
arg0_1 = self.dx.weight
arg2_1 = self.dy.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
IlyaBizyaev/ttools
|
ImageGradients
| false
| 8,300
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
decoder3
|
import torch
from torch import nn
class decoder3(nn.Module):
def __init__(self):
super(decoder3, self).__init__()
self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x):
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out_relu9 = self.relu9(out)
out = self.unpool2(out_relu9)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
return out
def get_inputs():
return [torch.rand([4, 256, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_2, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(36864)](primals_1, buf0,
36864, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 128, 4, 4), (2048, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(51200)](buf2, buf1, primals_3, buf3, 51200, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 8, 8), (8192, 64, 8, 1))
buf5 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(51200)](buf4,
primals_5, buf5, 51200, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 8, 8), (4096, 64, 8, 1))
buf7 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(82944)](buf7, buf6, primals_7, buf8, 82944, XBLOCK=512,
num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 16, 16), (16384, 256, 16, 1))
buf10 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(82944)](buf9,
primals_9, buf10, 82944, XBLOCK=512, num_warps=8, num_stages=1)
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 3, 16, 16), (768, 256, 16, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_7[grid(3072)](buf12, primals_11, 3072,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(65536)](
buf9, primals_9, buf13, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf9
del primals_9
buf14 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(16384)](
buf6, primals_7, buf14, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(32768)](
buf4, primals_5, buf15, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf16 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(8192)](
buf1, primals_3, buf16, 8192, XBLOCK=128, num_warps=4, num_stages=1
)
del buf1
del primals_3
return (buf12, primals_2, primals_4, primals_6, primals_8, primals_10,
buf0, buf2, buf3, buf5, buf7, buf8, buf10, buf13, buf14, buf15, buf16)
class decoder3New(nn.Module):
def __init__(self):
super(decoder3New, self).__init__()
self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, input_0):
primals_2 = self.conv7.weight
primals_3 = self.conv7.bias
primals_4 = self.conv8.weight
primals_5 = self.conv8.bias
primals_6 = self.conv9.weight
primals_7 = self.conv9.bias
primals_8 = self.conv10.weight
primals_9 = self.conv10.bias
primals_10 = self.conv11.weight
primals_11 = self.conv11.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Holmes-Alan/RefVAE
|
decoder3
| false
| 8,301
|
[
"MIT"
] | 13
|
836b8f1168f1b0f923b609a48e202ace7806f79c
|
https://github.com/Holmes-Alan/RefVAE/tree/836b8f1168f1b0f923b609a48e202ace7806f79c
|
CMMD
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CMMD(nn.Module):
def __init__(self, num_pos):
super(CMMD, self).__init__()
self.num_pos = num_pos
def forward(self, feat_v, feat_t):
feat_v = feat_v.view(feat_v.size(0), -1)
feat_v = F.normalize(feat_v, dim=-1)
feat_v_s = torch.split(feat_v, self.num_pos)
feat_t = feat_t.view(feat_t.size(0), -1)
feat_t = F.normalize(feat_t, dim=-1)
feat_t_s = torch.split(feat_t, self.num_pos)
losses = [self.mmd_loss(f_v, f_t) for f_v, f_t in zip(feat_v_s,
feat_t_s)]
loss = sum(losses) / len(losses)
return loss
def mmd_loss(self, f_v, f_t):
return self.poly_kernel(f_v, f_v).mean() + self.poly_kernel(f_t, f_t
).mean() - 2 * self.poly_kernel(f_v, f_t).mean()
def poly_kernel(self, a, b):
a = a.unsqueeze(0)
b = b.unsqueeze(1)
res = (a * b).sum(-1).pow(2)
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_pos': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (r2 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp8 = libdevice.sqrt(tmp7)
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = tmp6 / tmp9
tmp11 = tmp5 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp18 = libdevice.sqrt(tmp17)
tmp19 = triton_helpers.maximum(tmp18, tmp3)
tmp20 = tmp16 / tmp19
tmp23 = libdevice.sqrt(tmp22)
tmp24 = triton_helpers.maximum(tmp23, tmp3)
tmp25 = tmp21 / tmp24
tmp26 = tmp20 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tmp31 = tmp5 * tmp25
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.where(xmask, tmp32, 0)
tmp35 = tl.sum(tmp34, 1)[:, None]
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp30, xmask)
tl.store(out_ptr2 + x3, tmp35, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sub_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 16.0
tmp16 = tmp4 / tmp15
tmp17 = tmp9 / tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp14 / tmp15
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = tmp18 - tmp21
tmp23 = 0.0
tmp24 = tmp22 + tmp23
tmp25 = 1.0
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(4)](arg0_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_per_fused_linalg_vector_norm_0[grid(4)](arg1_1, buf1, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_mul_sum_1[grid(16)](arg0_1, buf0, arg1_1, buf1,
buf2, buf4, buf6, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3
del buf3
triton_per_fused_add_div_mean_mul_pow_sub_2[grid(1)](buf8, buf2,
buf4, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
del buf6
return buf8,
class CMMDNew(nn.Module):
def __init__(self, num_pos):
super(CMMDNew, self).__init__()
self.num_pos = num_pos
def mmd_loss(self, f_v, f_t):
return self.poly_kernel(f_v, f_v).mean() + self.poly_kernel(f_t, f_t
).mean() - 2 * self.poly_kernel(f_v, f_t).mean()
def poly_kernel(self, a, b):
a = a.unsqueeze(0)
b = b.unsqueeze(1)
res = (a * b).sum(-1).pow(2)
return res
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JDAI-CV/CM-NAS
|
CMMD
| false
| 8,302
|
[
"Apache-2.0"
] | 31
|
bbc77f427b2c8afb9f3865f5a04e86079d33dd28
|
https://github.com/JDAI-CV/CM-NAS/tree/bbc77f427b2c8afb9f3865f5a04e86079d33dd28
|
qy
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=0, mode='fan_in', nonlinearity
='leaky_relu')
try:
nn.init.constant_(m.bias, 0.01)
except:
pass
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, a=0, mode='fan_in', nonlinearity
='leaky_relu')
nn.init.constant_(m.bias, 0.01)
class Generator(nn.Module):
def __init__(self, input_dim=8, output_dim=2):
super(Generator, self).__init__()
self.linear1 = nn.Linear(input_dim, 512)
self.linear2 = nn.Linear(512, 512)
self.linear3 = nn.Linear(512, 512)
self.linear4 = nn.Linear(512, 256)
self.linear5 = nn.Linear(256, output_dim)
self.apply(weights_init)
def forward(self, condition, v0, t):
x = torch.cat([condition, v0], dim=1)
x = torch.cat([x, t], dim=1)
x = self.linear1(x)
x = F.leaky_relu(x, inplace=True)
x = self.linear2(x)
x = F.leaky_relu(x, inplace=True)
x = self.linear3(x)
x = F.leaky_relu(x, inplace=True)
x = self.linear4(x)
x = torch.cos(x)
x = self.linear5(x)
return x
class qy(nn.Module):
def __init__(self, zy_dim):
super(qy, self).__init__()
self.trajectory_generation = Generator(input_dim=1 + 1 + zy_dim,
output_dim=4)
def forward(self, zy, v0, t):
h = F.leaky_relu(zy, inplace=True)
condition = h.unsqueeze(1)
condition = condition.expand(h.shape[0], t.shape[-1], h.shape[-1])
condition = condition.reshape(h.shape[0] * t.shape[-1], h.shape[-1])
output = self.trajectory_generation(condition, v0.view(-1, 1), t.
view(-1, 1))
output_xy = output[:, :2]
logvar = output[:, 2:]
return output_xy, logvar
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'zy_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (4 * (x1 // 4) + x0), tmp7 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = 0.0
tmp10 = tmp8 > tmp9
tmp11 = 0.01
tmp12 = tmp8 * tmp11
tmp13 = tl.where(tmp10, tmp8, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp7, tmp13, tmp14)
tmp16 = tmp0 >= tmp5
tmp17 = tmp16 & tmp4
tmp18 = tl.load(in_ptr1 + x1, tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.where(tmp6, tmp15, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tl.full([1], 6, tl.int64)
tmp25 = tl.load(in_ptr2 + x1, tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.where(tmp4, tmp21, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_cos_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl_math.cos(tmp0)
tl.store(out_ptr0 + x0, tmp1, None)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.01
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr1 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (512, 6), (6, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (512, 512), (512, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (512, 512), (512, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (256, 512), (512, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4, 256), (256, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 6), (6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(96)](primals_1, primals_3, primals_2,
buf0, 96, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (6, 512), (1,
6), 0), out=buf1)
del primals_4
buf2 = buf1
del buf1
triton_poi_fused_leaky_relu_1[grid(8192)](buf2, primals_5, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (512, 512), (
1, 512), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_leaky_relu_1[grid(8192)](buf4, primals_7, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (512, 512), (
1, 512), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_leaky_relu_1[grid(8192)](buf6, primals_9, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((16, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_11, buf6, reinterpret_tensor(
primals_10, (512, 256), (1, 512), 0), alpha=1, beta=1, out=buf7)
del primals_11
buf8 = empty_strided_cuda((16, 256), (256, 1), torch.float32)
triton_poi_fused_cos_2[grid(4096)](buf7, buf8, 4096, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, buf8, reinterpret_tensor(
primals_12, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf9)
del primals_13
triton_poi_fused_leaky_relu_3[grid(16)](primals_1, primals_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return (reinterpret_tensor(buf9, (16, 2), (4, 1), 0),
reinterpret_tensor(buf9, (16, 2), (4, 1), 2), buf9, buf0, buf2,
buf4, buf6, buf7, buf8, primals_12, primals_10, primals_8, primals_6)
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=0, mode='fan_in', nonlinearity
='leaky_relu')
try:
nn.init.constant_(m.bias, 0.01)
except:
pass
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, a=0, mode='fan_in', nonlinearity
='leaky_relu')
nn.init.constant_(m.bias, 0.01)
class Generator(nn.Module):
def __init__(self, input_dim=8, output_dim=2):
super(Generator, self).__init__()
self.linear1 = nn.Linear(input_dim, 512)
self.linear2 = nn.Linear(512, 512)
self.linear3 = nn.Linear(512, 512)
self.linear4 = nn.Linear(512, 256)
self.linear5 = nn.Linear(256, output_dim)
self.apply(weights_init)
def forward(self, condition, v0, t):
x = torch.cat([condition, v0], dim=1)
x = torch.cat([x, t], dim=1)
x = self.linear1(x)
x = F.leaky_relu(x, inplace=True)
x = self.linear2(x)
x = F.leaky_relu(x, inplace=True)
x = self.linear3(x)
x = F.leaky_relu(x, inplace=True)
x = self.linear4(x)
x = torch.cos(x)
x = self.linear5(x)
return x
class qyNew(nn.Module):
def __init__(self, zy_dim):
super(qyNew, self).__init__()
self.trajectory_generation = Generator(input_dim=1 + 1 + zy_dim,
output_dim=4)
def forward(self, input_0, input_1, input_2):
primals_4 = self.trajectory_generation.linear1.weight
primals_5 = self.trajectory_generation.linear1.bias
primals_6 = self.trajectory_generation.linear2.weight
primals_7 = self.trajectory_generation.linear2.bias
primals_8 = self.trajectory_generation.linear3.weight
primals_9 = self.trajectory_generation.linear3.bias
primals_10 = self.trajectory_generation.linear4.weight
primals_11 = self.trajectory_generation.linear4.bias
primals_12 = self.trajectory_generation.linear5.weight
primals_13 = self.trajectory_generation.linear5.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
IamWangYunKai/DG-TrajGen
|
qy
| false
| 8,303
|
[
"MIT"
] | 31
|
0a8aab7e1c05111a5afe43d53801c55942e9ff56
|
https://github.com/IamWangYunKai/DG-TrajGen/tree/0a8aab7e1c05111a5afe43d53801c55942e9ff56
|
decoder6
|
import torch
from torch import nn
class decoder6(nn.Module):
def __init__(self):
super(decoder6, self).__init__()
self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu11 = nn.ReLU(inplace=True)
self.unpool1 = nn.ConvTranspose2d(256, 256, 4, 2, 1)
self.act1 = nn.ReLU()
self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv12 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu12 = nn.ReLU(inplace=True)
self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv13 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu13 = nn.ReLU(inplace=True)
self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv14 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu14 = nn.ReLU(inplace=True)
self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv15 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool2 = nn.ConvTranspose2d(128, 128, 4, 2, 1)
self.act2 = nn.ReLU()
self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv16 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu16 = nn.ReLU(inplace=True)
self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv17 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu17 = nn.ReLU(inplace=True)
self.unpool3 = nn.ConvTranspose2d(64, 64, 4, 2, 1)
self.act3 = nn.ReLU()
self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv18 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu18 = nn.ReLU(inplace=True)
self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv19 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x):
out = self.reflecPad11(x)
out = self.conv11(out)
out = self.relu11(out)
out = self.act1(self.unpool1(out))
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
out = self.reflecPad15(out)
out = self.conv15(out)
out = self.relu15(out)
out = self.act2(self.unpool2(out))
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.act3(self.unpool3(out))
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
return out
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_21, (64,), (1,))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_25, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(16384)](buf2, primals_3,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 8, 8), (16384, 64, 8, 1))
buf4 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_2[grid(102400)](buf3
, primals_5, buf4, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 8, 8), (16384, 64, 8, 1))
buf6 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_2[grid(102400)](buf5
, primals_7, buf6, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 256, 8, 8), (16384, 64, 8, 1))
buf8 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_2[grid(102400)](buf7
, primals_9, buf8, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 256, 8, 8), (16384, 64, 8, 1))
buf10 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_2[grid(102400)](buf9
, primals_11, buf10, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf11 = extern_kernels.convolution(buf10, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 8, 8), (8192, 64, 8, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_3[grid(32768)](buf12, primals_13,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf13 = extern_kernels.convolution(buf12, primals_14, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_4[grid(165888)](
buf13, primals_15, buf14, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 128, 16, 16), (32768, 256, 16, 1))
buf16 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_4[grid(165888)](
buf15, primals_17, buf16, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 16, 16), (16384, 256, 16, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_5[grid(65536)](buf18, primals_19,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf19 = extern_kernels.convolution(buf18, primals_20, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf20 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(295936)](
buf19, primals_21, buf20, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf21 = extern_kernels.convolution(buf20, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf22 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(295936)](
buf21, primals_23, buf22, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_7[grid(12288)](buf24, primals_25,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf25 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(262144)](
buf21, primals_23, buf25, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf21
del primals_23
buf26 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(262144)](
buf19, primals_21, buf26, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf19
del primals_21
buf27 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(131072)](
buf15, primals_17, buf27, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf15
del primals_17
buf28 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(131072)](
buf13, primals_15, buf28, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_15
buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(65536)](
buf9, primals_11, buf29, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf9
del primals_11
buf30 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(65536)](
buf7, primals_9, buf30, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf7
del primals_9
buf31 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(65536)](
buf5, primals_7, buf31, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf5
del primals_7
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(65536)](
buf3, primals_5, buf32, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf3
del primals_5
return (buf24, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, primals_20,
primals_22, primals_24, buf0, buf2, buf4, buf6, buf8, buf10, buf12,
buf14, buf16, buf18, buf20, buf22, buf25, buf26, buf27, buf28,
buf29, buf30, buf31, buf32)
class decoder6New(nn.Module):
def __init__(self):
super(decoder6New, self).__init__()
self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu11 = nn.ReLU(inplace=True)
self.unpool1 = nn.ConvTranspose2d(256, 256, 4, 2, 1)
self.act1 = nn.ReLU()
self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv12 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu12 = nn.ReLU(inplace=True)
self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv13 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu13 = nn.ReLU(inplace=True)
self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv14 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu14 = nn.ReLU(inplace=True)
self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv15 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool2 = nn.ConvTranspose2d(128, 128, 4, 2, 1)
self.act2 = nn.ReLU()
self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv16 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu16 = nn.ReLU(inplace=True)
self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv17 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu17 = nn.ReLU(inplace=True)
self.unpool3 = nn.ConvTranspose2d(64, 64, 4, 2, 1)
self.act3 = nn.ReLU()
self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv18 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu18 = nn.ReLU(inplace=True)
self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv19 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, input_0):
primals_2 = self.conv11.weight
primals_3 = self.conv11.bias
primals_4 = self.unpool1.weight
primals_5 = self.unpool1.bias
primals_6 = self.conv12.weight
primals_7 = self.conv12.bias
primals_8 = self.conv13.weight
primals_9 = self.conv13.bias
primals_10 = self.conv14.weight
primals_11 = self.conv14.bias
primals_12 = self.conv15.weight
primals_13 = self.conv15.bias
primals_14 = self.unpool2.weight
primals_15 = self.unpool2.bias
primals_16 = self.conv16.weight
primals_17 = self.conv16.bias
primals_18 = self.conv17.weight
primals_19 = self.conv17.bias
primals_20 = self.unpool3.weight
primals_21 = self.unpool3.bias
primals_22 = self.conv18.weight
primals_23 = self.conv18.bias
primals_24 = self.conv19.weight
primals_25 = self.conv19.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0]
|
Holmes-Alan/RefVAE
|
decoder6
| false
| 8,304
|
[
"MIT"
] | 13
|
836b8f1168f1b0f923b609a48e202ace7806f79c
|
https://github.com/Holmes-Alan/RefVAE/tree/836b8f1168f1b0f923b609a48e202ace7806f79c
|
SP
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SP(nn.Module):
def __init__(self):
super(SP, self).__init__()
def forward(self, feat_v, feat_t):
feat_v = feat_v.view(feat_v.size(0), -1)
G_v = torch.mm(feat_v, feat_v.t())
norm_G_v = F.normalize(G_v, p=2, dim=1)
feat_t = feat_t.view(feat_t.size(0), -1)
G_t = torch.mm(feat_t, feat_t.t())
norm_G_t = F.normalize(G_t, p=2, dim=1)
loss = F.mse_loss(norm_G_v, norm_G_t)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + r2, None)
tmp17 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 16.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (4, 64), (64, 1), 0),
reinterpret_tensor(arg0_1, (64, 4), (1, 64), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg1_1, (4, 64), (64, 1), 0),
reinterpret_tensor(arg1_1, (64, 4), (1, 64), 0), out=buf1)
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_div_mse_loss_0[grid(1)](buf4, buf0, buf1, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf4,
class SPNew(nn.Module):
def __init__(self):
super(SPNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JDAI-CV/CM-NAS
|
SP
| false
| 8,305
|
[
"Apache-2.0"
] | 31
|
bbc77f427b2c8afb9f3865f5a04e86079d33dd28
|
https://github.com/JDAI-CV/CM-NAS/tree/bbc77f427b2c8afb9f3865f5a04e86079d33dd28
|
PSNR
|
import torch
import torch as th
import torch.utils.data
class PSNR(th.nn.Module):
def __init__(self):
super(PSNR, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, out, ref):
mse = self.mse(out, ref)
return -10 * th.log10(mse + 1e-12)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch as th
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1e-12
tmp10 = tmp8 + tmp9
tmp11 = libdevice.log10(tmp10)
tmp12 = -10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log10_mse_loss_mul_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PSNRNew(th.nn.Module):
def __init__(self):
super(PSNRNew, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IlyaBizyaev/ttools
|
PSNR
| false
| 8,306
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
BicubicUpsampler
|
import torch
import torch as th
import torch.utils.data
class BicubicUpsampler(th.nn.Module):
def __init__(self, scale=2, channels=1):
super(BicubicUpsampler, self).__init__()
ksize = 2 * scale * 2
total_pad = ksize - scale // 2
if scale % 2 == 1:
ksize += 1
self.pad = th.nn.ReplicationPad2d((2, 2, 2, 2))
self.us_x = th.nn.ConvTranspose2d(channels, channels, (1, ksize),
stride=(1, scale), padding=(0, total_pad), groups=channels,
bias=False)
self.us_y = th.nn.ConvTranspose2d(channels, channels, (ksize, 1),
stride=(scale, 1), padding=(total_pad, 0), groups=channels,
bias=False)
k_idx = th.arange(0, ksize) + 0.5
k_coord = k_idx / scale - ksize * 0.5 / scale
absx = th.abs(k_coord)
absx2 = absx.pow(2)
absx3 = absx.pow(3)
k_weight = th.zeros(ksize)
k_weight += (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2.0) * ((absx >
1.0) & (absx < 2.0))
k_weight += (1.5 * absx3 - 2.5 * absx2 + 1.0) * (absx <= 1.0)
for c in range(channels):
self.us_x.weight.data[c, 0, 0, :].copy_(k_weight)
self.us_y.weight.data[c, 0, :, 0].copy_(k_weight)
for p in self.parameters():
p.requires_grad = False
def forward(self, x):
x = self.pad(x)
x = self.us_x(x)
x = self.us_y(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch as th
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -2 + x1) + (-2 + x1) *
(-2 + x1 > 0)) + (0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0)) *
(0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0) < 3)) + 16 * x2 + (
3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >=
-2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 1, 8), (8, 8, 8, 1))
assert_size_stride(arg2_1, (1, 1, 8, 1), (8, 8, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad2d_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 2),
padding=(0, 7), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 8, 8), (64, 64, 8, 1))
del arg1_1
del buf0
buf2 = extern_kernels.convolution(buf1, arg2_1, stride=(2, 1),
padding=(7, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 8, 8), (64, 64, 8, 1))
del arg2_1
del buf1
return buf2,
class BicubicUpsamplerNew(th.nn.Module):
def __init__(self, scale=2, channels=1):
super(BicubicUpsamplerNew, self).__init__()
ksize = 2 * scale * 2
total_pad = ksize - scale // 2
if scale % 2 == 1:
ksize += 1
self.pad = th.nn.ReplicationPad2d((2, 2, 2, 2))
self.us_x = th.nn.ConvTranspose2d(channels, channels, (1, ksize),
stride=(1, scale), padding=(0, total_pad), groups=channels,
bias=False)
self.us_y = th.nn.ConvTranspose2d(channels, channels, (ksize, 1),
stride=(scale, 1), padding=(total_pad, 0), groups=channels,
bias=False)
k_idx = th.arange(0, ksize) + 0.5
k_coord = k_idx / scale - ksize * 0.5 / scale
absx = th.abs(k_coord)
absx2 = absx.pow(2)
absx3 = absx.pow(3)
k_weight = th.zeros(ksize)
k_weight += (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2.0) * ((absx >
1.0) & (absx < 2.0))
k_weight += (1.5 * absx3 - 2.5 * absx2 + 1.0) * (absx <= 1.0)
for c in range(channels):
self.us_x.weight.data[c, 0, 0, :].copy_(k_weight)
self.us_y.weight.data[c, 0, :, 0].copy_(k_weight)
for p in self.parameters():
p.requires_grad = False
def forward(self, input_0):
arg1_1 = self.us_x.weight
arg2_1 = self.us_y.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
IlyaBizyaev/ttools
|
BicubicUpsampler
| false
| 8,307
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
FCChain
|
import torch
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
class FCModule(nn.Module):
"""Basic fully connected module with optional dropout.
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
activation(str): nonlinear activation function.
dropout(float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, n_out, activation=None, dropout=None):
super(FCModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer'
self.add_module('fc', nn.Linear(n_in, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
if dropout is not None:
self.add_module('dropout', nn.Dropout(dropout, inplace=True))
_init_fc_or_conv(self.fc, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FCChain(nn.Module):
"""Linear chain of fully connected layers.
Args:
n_in(int): number of input channels.
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
activation(str): nonlinear activation function between convolutions.
dropout(float or list of float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, width=64, depth=3, activation='relu', dropout=None
):
super(FCChain, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a least: should have `depth` entries'
_in = _in + width[:-1]
_out = width
_activations = [activation] * depth
if dropout is not None:
assert isinstance(dropout, float) or isinstance(dropout, list
), 'Dropout should be a float or a list of floats'
if dropout is None or isinstance(dropout, float):
_dropout = [dropout] * depth
elif isinstance(dropout, list):
assert len(dropout
) == depth, "When specifying a list of dropout, the list should have 'depth' elements."
_dropout = dropout
for lvl in range(depth):
self.add_module('fc{}'.format(lvl), FCModule(_in[lvl], _out[lvl
], activation=_activations[lvl], dropout=_dropout[lvl]))
def forward(self, x):
for m in self.children():
x = m(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, None)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 256 * (x1 % 4 // 4) + 1024 * (
(4 * (x1 // 4 % 4) + x1 % 4) // 16)), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_view_2(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x4, tmp4, None)
tl.store(out_ptr1 + x4, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
triton_poi_fused_view_1[grid(4096)](buf1, buf2, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 64), (64, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (64, 64), (1,
64), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf3
buf10 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf4,
primals_5, buf10, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
triton_poi_fused_view_1[grid(4096)](buf4, buf5, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 64), (64, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (64, 64), (1,
64), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.
float32)
buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_view_2[grid(4096)](buf7,
primals_7, buf8, buf9, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del primals_7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, buf9, primals_6, buf10, primals_4, buf11
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
class FCModule(nn.Module):
"""Basic fully connected module with optional dropout.
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
activation(str): nonlinear activation function.
dropout(float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, n_out, activation=None, dropout=None):
super(FCModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer'
self.add_module('fc', nn.Linear(n_in, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
if dropout is not None:
self.add_module('dropout', nn.Dropout(dropout, inplace=True))
_init_fc_or_conv(self.fc, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FCChainNew(nn.Module):
"""Linear chain of fully connected layers.
Args:
n_in(int): number of input channels.
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
activation(str): nonlinear activation function between convolutions.
dropout(float or list of float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, width=64, depth=3, activation='relu', dropout=None
):
super(FCChainNew, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a least: should have `depth` entries'
_in = _in + width[:-1]
_out = width
_activations = [activation] * depth
if dropout is not None:
assert isinstance(dropout, float) or isinstance(dropout, list
), 'Dropout should be a float or a list of floats'
if dropout is None or isinstance(dropout, float):
_dropout = [dropout] * depth
elif isinstance(dropout, list):
assert len(dropout
) == depth, "When specifying a list of dropout, the list should have 'depth' elements."
_dropout = dropout
for lvl in range(depth):
self.add_module('fc{}'.format(lvl), FCModule(_in[lvl], _out[lvl
], activation=_activations[lvl], dropout=_dropout[lvl]))
def forward(self, input_0):
primals_1 = self.fc0.fc.weight
primals_2 = self.fc0.fc.bias
primals_4 = self.fc1.fc.weight
primals_5 = self.fc1.fc.bias
primals_6 = self.fc2.fc.weight
primals_7 = self.fc2.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
IlyaBizyaev/ttools
|
FCChain
| false
| 8,308
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
TransformerEncoderPostNormLayer
|
import torch
import torch.nn.functional as F
from torch import nn
from typing import Optional
from torch.nn import LayerNorm
def _get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderPostNormLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.0,
activation='relu'):
super().__init__()
assert dropout == 0.0
self.self_attn = nn.MultiheadAttention(d_model, nhead)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super().__setstate__(state)
def forward(self, src, src_mask: 'Optional[torch.Tensor]'=None,
src_key_padding_mask: 'Optional[torch.Tensor]'=None):
norm_src = self.norm1(src)
src2 = self.self_attn(norm_src, norm_src, norm_src, attn_mask=
src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + src2
norm_src = self.norm2(src)
src2 = self.linear2(self.activation(self.linear1(norm_src)))
src = src + src2
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
from torch import nn
from torch.nn import LayerNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (2048, 4), (4, 1))
assert_size_stride(primals_11, (2048,), (1,))
assert_size_stride(primals_12, (4, 2048), (2048, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_3, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 2048),
(1, 4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(8192)](buf17, primals_11, 8192, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (2048, 4),
(1, 2048), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_3, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_3, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
def _get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderPostNormLayerNew(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.0,
activation='relu'):
super().__init__()
assert dropout == 0.0
self.self_attn = nn.MultiheadAttention(d_model, nhead)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super().__setstate__(state)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_3 = self.self_attn.out_proj.weight
primals_1 = self.self_attn.out_proj.bias
primals_10 = self.linear1.weight
primals_11 = self.linear1.bias
primals_12 = self.linear2.weight
primals_2 = self.linear2.bias
primals_7 = self.norm1.weight
primals_8 = self.norm1.bias
primals_9 = self.norm2.weight
primals_13 = self.norm2.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
JDBumgardner/stone_ground_hearth_battles
|
TransformerEncoderPostNormLayer
| false
| 8,309
|
[
"Apache-2.0"
] | 20
|
9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f
|
https://github.com/JDBumgardner/stone_ground_hearth_battles/tree/9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f
|
ResidualAttentionBlock
|
import torch
from collections import OrderedDict
from torch import nn
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: 'torch.Tensor'):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: 'torch.Tensor'):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: 'int', n_head: 'int', attn_mask:
'torch.Tensor'=None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model,
d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear(
d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: 'torch.Tensor'):
self.attn_mask = self.attn_mask if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask
)[0]
def forward(self, x: 'torch.Tensor'):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from collections import OrderedDict
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 16), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0)
del buf4
triton_poi_fused_mul_3[grid(16)](buf7, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0
), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8)
buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__safe_softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__safe_softmax_5[grid(64)](buf8, buf9, buf10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32)
triton_poi_fused_clone_6[grid(4, 4)](buf11, buf12, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_7
buf14 = buf1
del buf1
buf15 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(4)](primals_1, buf13,
buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_1, buf13,
buf14, buf15, primals_8, primals_9, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf14
del buf15
del primals_9
buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0)
del buf9
extern_kernels.addmm(primals_11, buf16, reinterpret_tensor(
primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0)
del buf8
triton_poi_fused_mul_sigmoid_9[grid(64)](buf17, buf18, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1,
16), 0), out=buf19)
buf20 = buf19
del buf19
triton_poi_fused_add_10[grid(16)](buf20, primals_1, buf13,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor(
buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0),
reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: 'torch.Tensor'):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: 'torch.Tensor'):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlockNew(nn.Module):
def __init__(self, d_model: 'int', n_head: 'int', attn_mask:
'torch.Tensor'=None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model,
d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear(
d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: 'torch.Tensor'):
self.attn_mask = self.attn_mask if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask
)[0]
def forward(self, input_0):
primals_4 = self.attn.in_proj_weight
primals_5 = self.attn.in_proj_bias
primals_1 = self.attn.out_proj.weight
primals_2 = self.attn.out_proj.bias
primals_3 = self.ln_1.weight
primals_7 = self.ln_1.bias
primals_10 = self.mlp.c_fc.weight
primals_11 = self.mlp.c_fc.bias
primals_12 = self.mlp.c_proj.weight
primals_8 = self.mlp.c_proj.bias
primals_9 = self.ln_2.weight
primals_13 = self.ln_2.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Jack000/glid-3
|
ResidualAttentionBlock
| false
| 8,310
|
[
"MIT"
] | 31
|
4a18efc2785339ebc743e149a7955e34fff436fb
|
https://github.com/Jack000/glid-3/tree/4a18efc2785339ebc743e149a7955e34fff436fb
|
StyleLossBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class StyleLossBlock(nn.Module):
def __init__(self, target: 'torch.Tensor'):
super().__init__()
self.stored_value = None
self._loss = F.mse_loss
self.shape = target.shape
self._target_gram_matrix = nn.Parameter(self.gram_matrix(target).data)
@staticmethod
def gram_matrix(x: 'torch.Tensor') ->torch.Tensor:
bs, ch, h, w = x.size()
f = x.view(bs, ch, w * h)
f_t = f.transpose(1, 2)
g = f.bmm(f_t) / (ch * h * w)
return g
def forward(self, input_tensor: 'torch.Tensor') ->torch.Tensor:
input_gram_matrix = self.gram_matrix(input_tensor)
result = self._loss(input_gram_matrix, self._target_gram_matrix)
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'target': torch.rand([4, 4, 4, 4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mse_loss_mse_loss_backward_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.015625
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp3 - tmp2
tmp10 = 0.03125
tmp11 = tmp9 * tmp10
tmp12 = 64.0
tmp13 = tmp8 / tmp12
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp11, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64,
16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1,
16), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_div_mse_loss_mse_loss_backward_0[grid(1)](buf3,
buf0, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del primals_2
return buf3, buf2
class StyleLossBlockNew(nn.Module):
def __init__(self, target: 'torch.Tensor'):
super().__init__()
self.stored_value = None
self._loss = F.mse_loss
self.shape = target.shape
self._target_gram_matrix = nn.Parameter(self.gram_matrix(target).data)
@staticmethod
def gram_matrix(x: 'torch.Tensor') ->torch.Tensor:
bs, ch, h, w = x.size()
f = x.view(bs, ch, w * h)
f_t = f.transpose(1, 2)
g = f.bmm(f_t) / (ch * h * w)
return g
def forward(self, input_0):
primals_2 = self._target_gram_matrix
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Inkln/StyleTransferWithCatalyst
|
StyleLossBlock
| false
| 8,311
|
[
"Apache-2.0"
] | 11
|
c3181ecdfd32160907efc2d9d917a55925c25c11
|
https://github.com/Inkln/StyleTransferWithCatalyst/tree/c3181ecdfd32160907efc2d9d917a55925c25c11
|
BilinearUpsampler
|
import torch
import torch as th
import torch.utils.data
class BilinearUpsampler(th.nn.Module):
def __init__(self, scale=2, channels=1):
super(BilinearUpsampler, self).__init__()
ksize = 2 * scale
total_pad = ksize - scale // 2
if scale % 2 == 1:
ksize += 1
self.pad = th.nn.ReplicationPad2d((1, 1, 1, 1))
self.us_x = th.nn.ConvTranspose2d(channels, channels, (1, ksize),
stride=(1, scale), padding=(0, total_pad), groups=channels,
bias=False)
self.us_y = th.nn.ConvTranspose2d(channels, channels, (ksize, 1),
stride=(scale, 1), padding=(total_pad, 0), groups=channels,
bias=False)
k_idx = th.arange(0, ksize) + 0.5
k_coord = k_idx / scale - ksize * 0.5 / scale
k_weight = th.clamp(1.0 - th.abs(k_coord), min=0)
for c in range(channels):
self.us_x.weight.data[c, 0, 0, :].copy_(k_weight)
self.us_y.weight.data[c, 0, :, 0].copy_(k_weight)
for p in self.parameters():
p.requires_grad = False
def forward(self, x):
x = self.pad(x)
x = self.us_x(x)
x = self.us_y(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch as th
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) *
(-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) *
(0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + (
3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >=
-1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 +
x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 1, 4), (4, 4, 4, 1))
assert_size_stride(arg2_1, (1, 1, 4, 1), (4, 4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad2d_0[grid(144)](arg0_1, buf0, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 2),
padding=(0, 3), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 6, 8), (48, 48, 8, 1))
del arg1_1
del buf0
buf2 = extern_kernels.convolution(buf1, arg2_1, stride=(2, 1),
padding=(3, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 8, 8), (64, 64, 8, 1))
del arg2_1
del buf1
return buf2,
class BilinearUpsamplerNew(th.nn.Module):
def __init__(self, scale=2, channels=1):
super(BilinearUpsamplerNew, self).__init__()
ksize = 2 * scale
total_pad = ksize - scale // 2
if scale % 2 == 1:
ksize += 1
self.pad = th.nn.ReplicationPad2d((1, 1, 1, 1))
self.us_x = th.nn.ConvTranspose2d(channels, channels, (1, ksize),
stride=(1, scale), padding=(0, total_pad), groups=channels,
bias=False)
self.us_y = th.nn.ConvTranspose2d(channels, channels, (ksize, 1),
stride=(scale, 1), padding=(total_pad, 0), groups=channels,
bias=False)
k_idx = th.arange(0, ksize) + 0.5
k_coord = k_idx / scale - ksize * 0.5 / scale
k_weight = th.clamp(1.0 - th.abs(k_coord), min=0)
for c in range(channels):
self.us_x.weight.data[c, 0, 0, :].copy_(k_weight)
self.us_y.weight.data[c, 0, :, 0].copy_(k_weight)
for p in self.parameters():
p.requires_grad = False
def forward(self, input_0):
arg1_1 = self.us_x.weight
arg2_1 = self.us_y.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
IlyaBizyaev/ttools
|
BilinearUpsampler
| false
| 8,312
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
Conv1dResBlock
|
import torch
import torch.nn as nn
class Conv1d(nn.Conv1d):
"""
Convolution 1d
Args:
x: (N, T, C_in)
Returns:
y: (N, T, C_out)
"""
def __init__(self, in_channels, out_channels, kernel_size,
activation_fn=None, drop_rate=0.0, stride=1, padding='same',
dilation=1, groups=1, bias=True, ln=False):
if padding == 'same':
padding = kernel_size // 2 * dilation
self.even_kernel = not bool(kernel_size % 2)
super(Conv1d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias)
self.activation_fn = activation_fn(inplace=True
) if activation_fn is not None else None
self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None
self.layer_norm = nn.LayerNorm(out_channels) if ln else None
def forward(self, x):
y = x.transpose(1, 2)
y = super(Conv1d, self).forward(y)
y = y.transpose(1, 2)
y = self.layer_norm(y) if self.layer_norm is not None else y
y = self.activation_fn(y) if self.activation_fn is not None else y
y = self.drop_out(y) if self.drop_out is not None else y
y = y[:, :-1, :] if self.even_kernel else y
return y
class Conv1dResBlock(Conv1d):
"""
Convolution 1d with Residual connection
Args:
x: (N, T, C_in)
Returns:
y: (N, T, C_out)
"""
def __init__(self, in_channels, out_channels, kernel_size,
activation_fn=None, drop_rate=0.0, stride=1, padding='same',
dilation=1, groups=1, bias=True, ln=False):
super(Conv1dResBlock, self).__init__(in_channels, out_channels,
kernel_size, activation_fn, drop_rate, stride, padding,
dilation, groups=groups, bias=bias, ln=ln)
def forward(self, x):
residual = x
x = super(Conv1dResBlock, self).forward(x)
x = x + residual
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 5 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5), (20, 5, 1))
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0)
del buf0
triton_poi_fused_add_1[grid(16, 4)](buf1, primals_3, primals_1,
buf2, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf2, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1,
4), 0)
class Conv1d(nn.Conv1d):
"""
Convolution 1d
Args:
x: (N, T, C_in)
Returns:
y: (N, T, C_out)
"""
def __init__(self, in_channels, out_channels, kernel_size,
activation_fn=None, drop_rate=0.0, stride=1, padding='same',
dilation=1, groups=1, bias=True, ln=False):
if padding == 'same':
padding = kernel_size // 2 * dilation
self.even_kernel = not bool(kernel_size % 2)
super(Conv1d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias)
self.activation_fn = activation_fn(inplace=True
) if activation_fn is not None else None
self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None
self.layer_norm = nn.LayerNorm(out_channels) if ln else None
def forward(self, x):
y = x.transpose(1, 2)
y = super(Conv1d, self).forward(y)
y = y.transpose(1, 2)
y = self.layer_norm(y) if self.layer_norm is not None else y
y = self.activation_fn(y) if self.activation_fn is not None else y
y = self.drop_out(y) if self.drop_out is not None else y
y = y[:, :-1, :] if self.even_kernel else y
return y
class Conv1dResBlockNew(Conv1d):
"""
Convolution 1d with Residual connection
Args:
x: (N, T, C_in)
Returns:
y: (N, T, C_out)
"""
def __init__(self, in_channels, out_channels, kernel_size,
activation_fn=None, drop_rate=0.0, stride=1, padding='same',
dilation=1, groups=1, bias=True, ln=False):
super(Conv1dResBlockNew, self).__init__(in_channels, out_channels,
kernel_size, activation_fn, drop_rate, stride, padding,
dilation, groups=groups, bias=bias, ln=ln)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Jackson-Kang/VQVC-Pytorch
|
Conv1dResBlock
| false
| 8,313
|
[
"MIT"
] | 13
|
d2267b5c52253b6ae11a5767963a65320ae335c2
|
https://github.com/Jackson-Kang/VQVC-Pytorch/tree/d2267b5c52253b6ae11a5767963a65320ae335c2
|
DepthwiseSeparableConv
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class DepthwiseSeparableConv(nn.Module):
"""
Depth-wise separable convolution uses less parameters to generate output by convolution.
:Examples:
>>> m = DepthwiseSeparableConv(300, 200, 5, dim=1)
>>> input_tensor = torch.randn(32, 300, 20)
>>> output = m(input_tensor)
"""
def __init__(self, in_ch, out_ch, k, dim=1, relu=True):
"""
:param in_ch: input hidden dimension size
:param out_ch: output hidden dimension size
:param k: kernel size
:param dim: default 1. 1D conv or 2D conv
"""
super(DepthwiseSeparableConv, self).__init__()
self.relu = relu
if dim == 1:
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
elif dim == 2:
self.depthwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
else:
raise Exception('Incorrect dimension!')
def forward(self, x):
"""
:Input: (N, L_in, D)
:Output: (N, L_out, D)
"""
x = x.transpose(1, 2)
if self.relu:
out = F.relu(self.pointwise_conv(self.depthwise_conv(x)),
inplace=True)
else:
out = self.pointwise_conv(self.depthwise_conv(x))
return out.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 5 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 5 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 5), (20, 5, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(80)](buf2, primals_3, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 5), (20, 5, 1))
buf4 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(80)](buf3,
primals_5, buf4, buf5, 80, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
return reinterpret_tensor(buf5, (4, 5, 4), (20, 1, 5), 0
), primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (
16, 1, 4), 0), buf2, buf4
class DepthwiseSeparableConvNew(nn.Module):
"""
Depth-wise separable convolution uses less parameters to generate output by convolution.
:Examples:
>>> m = DepthwiseSeparableConv(300, 200, 5, dim=1)
>>> input_tensor = torch.randn(32, 300, 20)
>>> output = m(input_tensor)
"""
def __init__(self, in_ch, out_ch, k, dim=1, relu=True):
"""
:param in_ch: input hidden dimension size
:param out_ch: output hidden dimension size
:param k: kernel size
:param dim: default 1. 1D conv or 2D conv
"""
super(DepthwiseSeparableConvNew, self).__init__()
self.relu = relu
if dim == 1:
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
elif dim == 2:
self.depthwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
else:
raise Exception('Incorrect dimension!')
def forward(self, input_0):
primals_2 = self.depthwise_conv.weight
primals_3 = self.depthwise_conv.bias
primals_4 = self.pointwise_conv.weight
primals_5 = self.pointwise_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
IsaacChanghau/ReLoCLNet
|
DepthwiseSeparableConv
| false
| 8,314
|
[
"MIT"
] | 31
|
56cb666ce516cce9acbcfce78fb4e95d81e11e54
|
https://github.com/IsaacChanghau/ReLoCLNet/tree/56cb666ce516cce9acbcfce78fb4e95d81e11e54
|
NormalDivLoss
|
import torch
import torch.nn as nn
def fuzzyDist(x, a=0.1, b=2):
return 1 / (1 + (x / a).abs().pow(2 * b))
class SoftHist(nn.Module):
def __init__(self, bins, dist):
super(SoftHist, self).__init__()
bins[1] - bins[0]
self.bins = nn.Parameter(bins.unsqueeze(1))
self.dist = dist
self.pdf = lambda h: h / h.sum()
def forward(self, x):
diffs = x.squeeze() - self.bins
distances = self.dist(diffs)
hist = distances.sum(1)
hist_norm = self.pdf(hist)
return hist_norm
class NormalDivLoss(nn.Module):
def __init__(self, dist=fuzzyDist):
super(NormalDivLoss, self).__init__()
bins = torch.arange(-10, 10, 0.2)
binwidth = bins[1] - bins[0]
self.hist = SoftHist(bins, dist)
self.kl = nn.KLDivLoss(reduction='batchmean')
self.target = nn.Parameter(binwidth * torch.distributions.normal.
Normal(0, 0.3).log_prob(bins).exp().unsqueeze(1))
def forward(self, x):
hist = self.hist(x)
hist_log = torch.log(hist).unsqueeze(1)
return self.kl(hist_log, self.target)
def get_inputs():
return [torch.rand([4, 4, 100, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_red_fused_abs_add_div_mul_pow_reciprocal_sub_sum_xlogy_0(in_out_ptr0
, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp44 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex // 400
r4 = rindex % 400
r1 = rindex // 4 % 100
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r4 + 1600 * r2), rmask, eviction_policy=
'evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last',
other=0.0)
tmp13 = tl.load(in_ptr0 + (400 + r4 + 1600 * r2), rmask,
eviction_policy='evict_first', other=0.0)
tmp23 = tl.load(in_ptr0 + (800 + r4 + 1600 * r2), rmask,
eviction_policy='evict_first', other=0.0)
tmp33 = tl.load(in_ptr0 + (1200 + r4 + 1600 * r2), rmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = 10.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.abs(tmp4)
tmp6 = tmp5 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tl.full([1, 1], 1, tl.int32)
tmp11 = tmp10 / tmp9
tmp12 = tmp11 * tmp8
tmp14 = tmp13 - tmp1
tmp15 = tmp14 * tmp3
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp18 + tmp8
tmp20 = tmp10 / tmp19
tmp21 = tmp20 * tmp8
tmp22 = tmp12 + tmp21
tmp24 = tmp23 - tmp1
tmp25 = tmp24 * tmp3
tmp26 = tl_math.abs(tmp25)
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp28 + tmp8
tmp30 = tmp10 / tmp29
tmp31 = tmp30 * tmp8
tmp32 = tmp22 + tmp31
tmp34 = tmp33 - tmp1
tmp35 = tmp34 * tmp3
tmp36 = tl_math.abs(tmp35)
tmp37 = tmp36 * tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp38 + tmp8
tmp40 = tmp10 / tmp39
tmp41 = tmp40 * tmp8
tmp42 = tmp32 + tmp41
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = _tmp44 + tmp43
_tmp44 = tl.where(rmask, tmp45, _tmp44)
tl.store(out_ptr0 + tl.broadcast_to(r3, [XBLOCK, RBLOCK]), tmp42, rmask
)
tmp44 = tl.sum(_tmp44, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp44, None)
_tmp61 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex // 4 % 100
r3 = rindex
tmp46 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last',
other=0.0)
tmp55 = tl.load(out_ptr0 + r3, rmask, eviction_policy='evict_first',
other=0.0)
tmp47 = libdevice.isnan(tmp46).to(tl.int1)
tmp48 = 0.0
tmp49 = tmp46 == tmp48
tmp50 = tl_math.log(tmp46)
tmp51 = tmp46 * tmp50
tmp52 = tl.where(tmp49, tmp48, tmp51)
tmp53 = float('nan')
tmp54 = tl.where(tmp47, tmp53, tmp52)
tmp56 = tmp55 / tmp44
tmp57 = tl_math.log(tmp56)
tmp58 = tmp46 * tmp57
tmp59 = tmp54 - tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = _tmp61 + tmp60
_tmp61 = tl.where(rmask, tmp62, _tmp61)
tmp61 = tl.sum(_tmp61, 1)[:, None]
tmp63 = 0.25
tmp64 = tmp61 * tmp63
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp64, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 100, 4), (1600, 400, 4, 1))
assert_size_stride(primals_2, (100, 1), (1, 1))
assert_size_stride(primals_3, (100, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 100, 4), (400, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_red_fused_abs_add_div_mul_pow_reciprocal_sub_sum_xlogy_0[grid(1)
](buf3, primals_1, primals_2, primals_3, buf0, buf1, 1, 1600,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del buf0
return buf3, primals_1, primals_2, primals_3, buf1
def fuzzyDist(x, a=0.1, b=2):
return 1 / (1 + (x / a).abs().pow(2 * b))
class SoftHist(nn.Module):
def __init__(self, bins, dist):
super(SoftHist, self).__init__()
bins[1] - bins[0]
self.bins = nn.Parameter(bins.unsqueeze(1))
self.dist = dist
self.pdf = lambda h: h / h.sum()
def forward(self, x):
diffs = x.squeeze() - self.bins
distances = self.dist(diffs)
hist = distances.sum(1)
hist_norm = self.pdf(hist)
return hist_norm
class NormalDivLossNew(nn.Module):
def __init__(self, dist=fuzzyDist):
super(NormalDivLossNew, self).__init__()
bins = torch.arange(-10, 10, 0.2)
binwidth = bins[1] - bins[0]
self.hist = SoftHist(bins, dist)
self.kl = nn.KLDivLoss(reduction='batchmean')
self.target = nn.Parameter(binwidth * torch.distributions.normal.
Normal(0, 0.3).log_prob(bins).exp().unsqueeze(1))
def forward(self, input_0):
primals_2 = self.target
primals_3 = self.hist.bins
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JWHan717/CS492I-Project
|
NormalDivLoss
| false
| 8,315
|
[
"MIT"
] | 23
|
5da80bc41425ee90711a3de89c5501b5f7acd4b7
|
https://github.com/JWHan717/CS492I-Project/tree/5da80bc41425ee90711a3de89c5501b5f7acd4b7
|
ConvEncoder
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class DepthwiseSeparableConv(nn.Module):
"""
Depth-wise separable convolution uses less parameters to generate output by convolution.
:Examples:
>>> m = DepthwiseSeparableConv(300, 200, 5, dim=1)
>>> input_tensor = torch.randn(32, 300, 20)
>>> output = m(input_tensor)
"""
def __init__(self, in_ch, out_ch, k, dim=1, relu=True):
"""
:param in_ch: input hidden dimension size
:param out_ch: output hidden dimension size
:param k: kernel size
:param dim: default 1. 1D conv or 2D conv
"""
super(DepthwiseSeparableConv, self).__init__()
self.relu = relu
if dim == 1:
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
elif dim == 2:
self.depthwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
else:
raise Exception('Incorrect dimension!')
def forward(self, x):
"""
:Input: (N, L_in, D)
:Output: (N, L_out, D)
"""
x = x.transpose(1, 2)
if self.relu:
out = F.relu(self.pointwise_conv(self.depthwise_conv(x)),
inplace=True)
else:
out = self.pointwise_conv(self.depthwise_conv(x))
return out.transpose(1, 2)
class ConvEncoder(nn.Module):
def __init__(self, kernel_size=7, n_filters=128, dropout=0.1):
super(ConvEncoder, self).__init__()
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(n_filters)
self.conv = DepthwiseSeparableConv(in_ch=n_filters, out_ch=
n_filters, k=kernel_size, relu=True)
def forward(self, x):
"""
:param x: (N, L, D)
:return: (N, L, D)
"""
return self.layer_norm(self.dropout(self.conv(x)) + x)
def get_inputs():
return [torch.rand([4, 128, 128])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 16384 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 128 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 128 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_red_fused_add_native_layer_norm_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 128
x1 = xindex // 128
x3 = xindex
tmp6_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 16384 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r2 + 128 * x3), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6_mean_next, tmp6_m2_next, tmp6_weight_next = (triton_helpers.
welford_reduce(tmp5, tmp6_mean, tmp6_m2, tmp6_weight, roffset == 0)
)
tmp6_mean = tl.where(rmask & xmask, tmp6_mean_next, tmp6_mean)
tmp6_m2 = tl.where(rmask & xmask, tmp6_m2_next, tmp6_m2)
tmp6_weight = tl.where(rmask & xmask, tmp6_weight_next, tmp6_weight)
tmp6_tmp, tmp7_tmp, tmp8_tmp = triton_helpers.welford(tmp6_mean,
tmp6_m2, tmp6_weight, 1)
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tmp8_tmp[:, None]
tl.store(out_ptr0 + x3, tmp6, xmask)
tmp9 = 128.0
tmp10 = tmp7 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp13, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp14 = tl.load(in_ptr0 + (x0 + 128 * r2 + 16384 * x1), rmask &
xmask, eviction_policy='evict_first', other=0.0)
tmp17 = tl.load(in_ptr1 + (r2 + 128 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp23 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp15 = tl.full([1, 1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = tmp16 + tmp17
tmp19 = tmp18 - tmp6
tmp20 = tmp19 * tmp13
tmp22 = tmp20 * tmp21
tmp24 = tmp22 + tmp23
tl.store(out_ptr1 + (r2 + 128 * x3), tmp24, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 128, 128), (16384, 128, 1))
assert_size_stride(primals_2, (128, 1, 7), (7, 7, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (128, 128, 1), (128, 1, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128,), (1,))
assert_size_stride(primals_7, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128, 128), (16384, 128, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(512, 128)](primals_1, buf0, 512,
128, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=128, bias=None)
assert_size_stride(buf1, (4, 128, 128), (16384, 128, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(65536)](buf2, primals_3, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 128), (16384, 128, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(65536)](buf4, primals_5, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 128, 1), (128, 1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 128, 1), (128, 1, 512), torch.float32)
buf8 = reinterpret_tensor(buf6, (4, 128, 1), (128, 1, 1), 0)
del buf6
buf9 = buf0
del buf0
triton_red_fused_add_native_layer_norm_2[grid(512)](buf8, buf4,
primals_1, primals_6, primals_7, buf5, buf9, 512, 128, XBLOCK=
64, RBLOCK=8, num_warps=4, num_stages=1)
del primals_7
return (buf9, primals_1, primals_2, primals_4, primals_6, buf2, buf4,
buf5, buf8)
class DepthwiseSeparableConv(nn.Module):
"""
Depth-wise separable convolution uses less parameters to generate output by convolution.
:Examples:
>>> m = DepthwiseSeparableConv(300, 200, 5, dim=1)
>>> input_tensor = torch.randn(32, 300, 20)
>>> output = m(input_tensor)
"""
def __init__(self, in_ch, out_ch, k, dim=1, relu=True):
"""
:param in_ch: input hidden dimension size
:param out_ch: output hidden dimension size
:param k: kernel size
:param dim: default 1. 1D conv or 2D conv
"""
super(DepthwiseSeparableConv, self).__init__()
self.relu = relu
if dim == 1:
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
elif dim == 2:
self.depthwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv2d(in_channels=in_ch, out_channels
=out_ch, kernel_size=1, padding=0)
else:
raise Exception('Incorrect dimension!')
def forward(self, x):
"""
:Input: (N, L_in, D)
:Output: (N, L_out, D)
"""
x = x.transpose(1, 2)
if self.relu:
out = F.relu(self.pointwise_conv(self.depthwise_conv(x)),
inplace=True)
else:
out = self.pointwise_conv(self.depthwise_conv(x))
return out.transpose(1, 2)
class ConvEncoderNew(nn.Module):
def __init__(self, kernel_size=7, n_filters=128, dropout=0.1):
super(ConvEncoderNew, self).__init__()
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(n_filters)
self.conv = DepthwiseSeparableConv(in_ch=n_filters, out_ch=
n_filters, k=kernel_size, relu=True)
def forward(self, input_0):
primals_3 = self.layer_norm.weight
primals_5 = self.layer_norm.bias
primals_2 = self.conv.depthwise_conv.weight
primals_6 = self.conv.depthwise_conv.bias
primals_4 = self.conv.pointwise_conv.weight
primals_7 = self.conv.pointwise_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
IsaacChanghau/ReLoCLNet
|
ConvEncoder
| false
| 8,316
|
[
"MIT"
] | 31
|
56cb666ce516cce9acbcfce78fb4e95d81e11e54
|
https://github.com/IsaacChanghau/ReLoCLNet/tree/56cb666ce516cce9acbcfce78fb4e95d81e11e54
|
ConvChain
|
import torch
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class ConvChain(nn.Module):
"""Linear chain of convolution layers.
Args:
n_in(int): number of input channels.
ksize(int or list of int): size of the convolution kernel (square).
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
strides(list of int): stride between kernels. If None, defaults to 1 for all.
pad(bool): if True, zero pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, ksize=3, width=64, depth=3, strides=None, pad=
True, padding_mode='zero', activation='relu', norm_layer=None):
super(ConvChain, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if strides is None:
_strides = [1] * depth
else:
assert isinstance(strides, list), 'strides should be a list'
assert len(strides
) == depth, 'strides should have `depth` elements'
_strides = strides
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a list should have `depth` elements'
_in = _in + width[:-1]
_out = width
if isinstance(ksize, int):
_ksizes = [ksize] * depth
elif isinstance(ksize, list):
assert len(ksize
) == depth, "kernel size list should have 'depth' entries"
_ksizes = ksize
_activations = [activation] * depth
_norms = [norm_layer] * depth
for lvl in range(depth):
self.add_module('conv{}'.format(lvl), ConvModule(_in[lvl], _out
[lvl], _ksizes[lvl], stride=_strides[lvl], pad=pad,
activation=_activations[lvl], norm_layer=_norms[lvl]))
def forward(self, x):
for m in self.children():
x = m(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_3,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(4096)](buf3, primals_5,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(4096)](buf5
, primals_7, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_2, primals_4, primals_6, buf1, buf3, buf6
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class ConvChainNew(nn.Module):
"""Linear chain of convolution layers.
Args:
n_in(int): number of input channels.
ksize(int or list of int): size of the convolution kernel (square).
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
strides(list of int): stride between kernels. If None, defaults to 1 for all.
pad(bool): if True, zero pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, ksize=3, width=64, depth=3, strides=None, pad=
True, padding_mode='zero', activation='relu', norm_layer=None):
super(ConvChainNew, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if strides is None:
_strides = [1] * depth
else:
assert isinstance(strides, list), 'strides should be a list'
assert len(strides
) == depth, 'strides should have `depth` elements'
_strides = strides
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a list should have `depth` elements'
_in = _in + width[:-1]
_out = width
if isinstance(ksize, int):
_ksizes = [ksize] * depth
elif isinstance(ksize, list):
assert len(ksize
) == depth, "kernel size list should have 'depth' entries"
_ksizes = ksize
_activations = [activation] * depth
_norms = [norm_layer] * depth
for lvl in range(depth):
self.add_module('conv{}'.format(lvl), ConvModule(_in[lvl], _out
[lvl], _ksizes[lvl], stride=_strides[lvl], pad=pad,
activation=_activations[lvl], norm_layer=_norms[lvl]))
def forward(self, input_0):
primals_2 = self.conv0.conv.weight
primals_3 = self.conv0.conv.bias
primals_4 = self.conv1.conv.weight
primals_5 = self.conv1.conv.bias
primals_6 = self.conv2.conv.weight
primals_7 = self.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
IlyaBizyaev/ttools
|
ConvChain
| false
| 8,317
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
Conv1d
|
import torch
import torch.nn as nn
class Conv1d(nn.Conv1d):
"""
Convolution 1d
Args:
x: (N, T, C_in)
Returns:
y: (N, T, C_out)
"""
def __init__(self, in_channels, out_channels, kernel_size,
activation_fn=None, drop_rate=0.0, stride=1, padding='same',
dilation=1, groups=1, bias=True, ln=False):
if padding == 'same':
padding = kernel_size // 2 * dilation
self.even_kernel = not bool(kernel_size % 2)
super(Conv1d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias)
self.activation_fn = activation_fn(inplace=True
) if activation_fn is not None else None
self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None
self.layer_norm = nn.LayerNorm(out_channels) if ln else None
def forward(self, x):
y = x.transpose(1, 2)
y = super(Conv1d, self).forward(y)
y = y.transpose(1, 2)
y = self.layer_norm(y) if self.layer_norm is not None else y
y = self.activation_fn(y) if self.activation_fn is not None else y
y = self.drop_out(y) if self.drop_out is not None else y
y = y[:, :-1, :] if self.even_kernel else y
return y
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 5 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5), (20, 5, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(80)](buf2, primals_3, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (20, 1, 5), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class Conv1dNew(nn.Conv1d):
"""
Convolution 1d
Args:
x: (N, T, C_in)
Returns:
y: (N, T, C_out)
"""
def __init__(self, in_channels, out_channels, kernel_size,
activation_fn=None, drop_rate=0.0, stride=1, padding='same',
dilation=1, groups=1, bias=True, ln=False):
if padding == 'same':
padding = kernel_size // 2 * dilation
self.even_kernel = not bool(kernel_size % 2)
super(Conv1dNew, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.activation_fn = activation_fn(inplace=True
) if activation_fn is not None else None
self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None
self.layer_norm = nn.LayerNorm(out_channels) if ln else None
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Jackson-Kang/VQVC-Pytorch
|
Conv1d
| false
| 8,318
|
[
"MIT"
] | 13
|
d2267b5c52253b6ae11a5767963a65320ae335c2
|
https://github.com/Jackson-Kang/VQVC-Pytorch/tree/d2267b5c52253b6ae11a5767963a65320ae335c2
|
KLLoss
|
import torch
from torch import nn
import torch.nn.functional as F
from math import sqrt as sqrt
from itertools import product as product
class KLLoss(nn.Module):
"""
Kl-loss function for bounding box regression from CVPR 2019 paper:
Bounding Box Regression with Uncertainty for Accurate Object Detection
by Yihui He, Chenchen Zhu, Jianren Wang. Marios Savvides, Xiangyu Zhang
It is a replacement for the Smooth L1 loss often used in bounding box regression.
The regression loss for a coordinate depends on |xg − xe| > 1 or not:
Loss |xg − xe| ≤ 1:
Lreg1 ∝ e^{−α} * 1/2(xg − xe)^2 + 1/2α
and if |xg − xe| > 1, Loss:
Lreg2 = e^{−α} (|xg − xe| − 1/2) + 1/2α
PyTorch implementation by Jasper Bakker (JappaB @github)
"""
def __init__(self, loc_loss_weight=1.0):
super(KLLoss, self).__init__()
self.loc_loss_weight = loc_loss_weight
def forward(self, xg, xe, alpha):
"""
:param xg: The ground truth of the bounding box coordinates in x1y1x2y2 format
shape: [number_of_boxes, 4]
:param xe: The estimated bounding box coordinates in x1y1x2y2 format
shape: [number_of_boxes, 4]
:param alpha: The log(sigma^2) of the bounding box coordinates in x1y1x2y2 format
shape: [number_of_boxes, 4]
:return: total_kl_loss
"""
assert xg.shape == xe.shape and xg.shape == alpha.shape, 'The shapes of the input tensors must be the same'
smooth_l1 = F.smooth_l1_loss(xe, xg, reduction='none')
exp_min_alpha = torch.exp(-alpha)
half_alpha = 0.5 * alpha
total_kl_loss = (exp_min_alpha * smooth_l1 + half_alpha).sum()
return total_kl_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_mul_neg_smooth_l1_loss_sum_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp4 = tl.load(in_ptr2 + r0, None)
tmp1 = -tmp0
tmp2 = tl_math.exp(tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = 1.0
tmp8 = tmp6 < tmp7
tmp9 = tmp6 * tmp6
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tmp11 * tmp7
tmp13 = tmp6 - tmp10
tmp14 = tl.where(tmp8, tmp12, tmp13)
tmp15 = tmp2 * tmp14
tmp16 = tmp0 * tmp10
tmp17 = tmp15 + tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_exp_mul_neg_smooth_l1_loss_sum_0[grid(1)](arg2_1,
arg1_1, arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class KLLossNew(nn.Module):
"""
Kl-loss function for bounding box regression from CVPR 2019 paper:
Bounding Box Regression with Uncertainty for Accurate Object Detection
by Yihui He, Chenchen Zhu, Jianren Wang. Marios Savvides, Xiangyu Zhang
It is a replacement for the Smooth L1 loss often used in bounding box regression.
The regression loss for a coordinate depends on |xg − xe| > 1 or not:
Loss |xg − xe| ≤ 1:
Lreg1 ∝ e^{−α} * 1/2(xg − xe)^2 + 1/2α
and if |xg − xe| > 1, Loss:
Lreg2 = e^{−α} (|xg − xe| − 1/2) + 1/2α
PyTorch implementation by Jasper Bakker (JappaB @github)
"""
def __init__(self, loc_loss_weight=1.0):
super(KLLossNew, self).__init__()
self.loc_loss_weight = loc_loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
JappaB/Active_Learning_Object_Detection
|
KLLoss
| false
| 8,319
|
[
"MIT"
] | 21
|
3d9ad367aa872cbf3e9d71c566042c78fe2d0e76
|
https://github.com/JappaB/Active_Learning_Object_Detection/tree/3d9ad367aa872cbf3e9d71c566042c78fe2d0e76
|
ResidualBlock
|
import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'):
super().__init__()
self._conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self._pad = nn.ReflectionPad2d(padding=kernel_size // 2)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self._pad(x)
x = self._conv(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels: 'int', kernel_size: 'int'=3):
super(ResidualBlock, self).__init__()
self._conv1 = ConvLayer(in_channels=channels, out_channels=channels,
kernel_size=kernel_size, stride=1)
self._in1 = nn.InstanceNorm2d(num_features=channels, affine=True)
self._relu = nn.ReLU()
self._conv2 = ConvLayer(in_channels=channels, out_channels=channels,
kernel_size=kernel_size, stride=1)
self._in2 = nn.InstanceNorm2d(num_features=channels, affine=True)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
residual = x
out = self._relu(self._in1(self._conv1(x)))
out = self._in2(self._conv2(out))
out = out + residual
out = self._relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp32 = tl.full([1, 1], 0, tl.int32)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp34 = 0.0
tmp35 = tmp33 <= tmp34
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp33, xmask)
tl.store(out_ptr4 + (r3 + 16 * x0), tmp35, xmask)
tl.store(out_ptr5 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf6
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf8, primals_3, buf5, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5,
buf8, buf3, buf4, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
buf11 = buf10
del buf10
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4[
grid(16)](buf11, primals_8, primals_7, primals_9, primals_1,
buf12, buf13, buf17, buf18, buf16, 16, 16, XBLOCK=1, num_warps=
2, num_stages=1)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8,
buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0),
buf18, reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ConvLayer(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'):
super().__init__()
self._conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self._pad = nn.ReflectionPad2d(padding=kernel_size // 2)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self._pad(x)
x = self._conv(x)
return x
class ResidualBlockNew(nn.Module):
def __init__(self, channels: 'int', kernel_size: 'int'=3):
super(ResidualBlockNew, self).__init__()
self._conv1 = ConvLayer(in_channels=channels, out_channels=channels,
kernel_size=kernel_size, stride=1)
self._in1 = nn.InstanceNorm2d(num_features=channels, affine=True)
self._relu = nn.ReLU()
self._conv2 = ConvLayer(in_channels=channels, out_channels=channels,
kernel_size=kernel_size, stride=1)
self._in2 = nn.InstanceNorm2d(num_features=channels, affine=True)
def forward(self, input_0):
primals_2 = self._conv1._conv.weight
primals_3 = self._conv1._conv.bias
primals_4 = self._in1.weight
primals_5 = self._in1.bias
primals_6 = self._conv2._conv.weight
primals_7 = self._conv2._conv.bias
primals_8 = self._in2.weight
primals_9 = self._in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Inkln/StyleTransferWithCatalyst
|
ResidualBlock
| false
| 8,320
|
[
"Apache-2.0"
] | 11
|
c3181ecdfd32160907efc2d9d917a55925c25c11
|
https://github.com/Inkln/StyleTransferWithCatalyst/tree/c3181ecdfd32160907efc2d9d917a55925c25c11
|
FixupBasicBlock
|
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = conv3x3(inplanes, planes, stride)
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = conv3x3(planes, planes)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.relu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
if self.downsample is not None:
identity = self.downsample(x + self.bias1a)
identity = torch.cat((identity, torch.zeros_like(identity)), 1)
out += identity
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp5 + tmp7
tmp9 = 0.0
tmp10 = tmp5 <= tmp9
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf1,
primals_4, primals_5, buf2, buf6, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_4
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf1
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_mul_relu_threshold_backward_2[grid(256)](buf3,
primals_7, primals_8, primals_1, buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class FixupBasicBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(FixupBasicBlockNew, self).__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = conv3x3(inplanes, planes, stride)
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = conv3x3(planes, planes)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
self.downsample = downsample
def forward(self, input_0):
primals_2 = self.bias1a
primals_4 = self.bias1b
primals_5 = self.bias2a
primals_7 = self.scale
primals_8 = self.bias2b
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
IlanPrice/DCTpS
|
FixupBasicBlock
| false
| 8,321
|
[
"MIT"
] | 12
|
e3219ac132959f484724e0d0bd48a0cb8af3d0fa
|
https://github.com/IlanPrice/DCTpS/tree/e3219ac132959f484724e0d0bd48a0cb8af3d0fa
|
TrainablePositionalEncoding
|
import torch
import torch.nn as nn
class TrainablePositionalEncoding(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, max_position_embeddings, hidden_size, dropout=0.1):
super(TrainablePositionalEncoding, self).__init__()
self.position_embeddings = nn.Embedding(max_position_embeddings,
hidden_size)
self.LayerNorm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input_feat):
bsz, seq_length = input_feat.shape[:2]
position_ids = torch.arange(seq_length, dtype=torch.long, device=
input_feat.device)
position_ids = position_ids.unsqueeze(0).repeat(bsz, 1)
position_embeddings = self.position_embeddings(position_ids)
embeddings = self.LayerNorm(input_feat + position_embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def add_position_emb(self, input_feat):
bsz, seq_length = input_feat.shape[:2]
position_ids = torch.arange(seq_length, dtype=torch.long, device=
input_feat.device)
position_ids = position_ids.unsqueeze(0).repeat(bsz, 1)
position_embeddings = self.position_embeddings(position_ids)
return input_feat + position_embeddings
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_position_embeddings': 4, 'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_repeat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = x0
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=
1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_embedding_1[grid(64)](primals_2, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](primals_1, buf1,
buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(256)](primals_1, buf1,
buf2, buf3, primals_3, primals_4, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
del buf3
del primals_4
return buf4, primals_1, primals_3, buf0, buf1
class TrainablePositionalEncodingNew(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, max_position_embeddings, hidden_size, dropout=0.1):
super(TrainablePositionalEncodingNew, self).__init__()
self.position_embeddings = nn.Embedding(max_position_embeddings,
hidden_size)
self.LayerNorm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def add_position_emb(self, input_feat):
bsz, seq_length = input_feat.shape[:2]
position_ids = torch.arange(seq_length, dtype=torch.long, device=
input_feat.device)
position_ids = position_ids.unsqueeze(0).repeat(bsz, 1)
position_embeddings = self.position_embeddings(position_ids)
return input_feat + position_embeddings
def forward(self, input_0):
primals_2 = self.position_embeddings.weight
primals_3 = self.LayerNorm.weight
primals_4 = self.LayerNorm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
IsaacChanghau/ReLoCLNet
|
TrainablePositionalEncoding
| false
| 8,322
|
[
"MIT"
] | 31
|
56cb666ce516cce9acbcfce78fb4e95d81e11e54
|
https://github.com/IsaacChanghau/ReLoCLNet/tree/56cb666ce516cce9acbcfce78fb4e95d81e11e54
|
FixupResidualChain
|
import torch
import numpy as np
import torch as th
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, padding_mode='zero',
activation='relu'):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.activation(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
crop = (self.ksize - 1) // 2 * 2
if crop > 0 and not self.pad:
identity = identity[:, :, crop:-crop, crop:-crop]
out += identity
out = self.activation2(out)
return out
class FixupResidualChain(nn.Module):
"""Linear chain of residual blocks.
Args:
n_features(int): number of input channels.
depth(int): number of residual blocks
ksize(int): size of the convolution kernel (square).
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
pad(bool): if True, zero pad the convs to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
"""
def __init__(self, n_features, depth=3, ksize=3, activation='relu',
norm_layer=None, pad=True, padding_mode='zero'):
super(FixupResidualChain, self).__init__()
assert isinstance(n_features, int
) and n_features > 0, 'Number of feature channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
self.depth = depth
layers = OrderedDict()
for lvl in range(depth):
blockname = 'resblock{}'.format(lvl)
layers[blockname] = FixupBasicBlock(n_features, ksize=ksize,
activation=activation, pad=pad, padding_mode=padding_mode)
self.net = nn.Sequential(layers)
self._reset_weights()
def _reset_weights(self):
for m in self.net.modules():
if isinstance(m, FixupBasicBlock):
nn.init.normal_(m.conv1.conv.weight, mean=0, std=np.sqrt(2 /
(m.conv1.conv.weight.shape[0] * np.prod(m.conv1.conv.
weight.shape[2:]))) * self.depth ** -0.5)
nn.init.constant_(m.conv2.conv.weight, 0)
def forward(self, x):
x = self.net(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch as th
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp10 = tmp7 + tmp9
tmp11 = 0.0
tmp12 = tmp7 <= tmp11
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp13 = tl.load(in_ptr4 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp15 = tmp12 + tmp14
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp10 = tl.load(in_ptr4 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr5 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr6 + x3, xmask)
tmp24 = tl.load(in_ptr7 + 0)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp12 = tmp9 * tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp20 = tmp8 + tmp19
tmp21 = triton_helpers.maximum(tmp18, tmp20)
tmp22 = 0.0
tmp23 = tmp19 <= tmp22
tmp26 = tmp21 + tmp25
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp21, xmask)
tl.store(out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr2 + x3, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_threshold_backward_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tmp15 = tmp9 <= tmp13
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27, primals_28
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (1,), (1,))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (1,), (1,))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (1,), (1,))
assert_size_stride(primals_21, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (1,), (1,))
assert_size_stride(primals_24, (1,), (1,))
assert_size_stride(primals_25, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (1,), (1,))
assert_size_stride(primals_28, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf1, primals_4, primals_5, primals_6, buf2, buf22, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_4
del primals_5
del primals_6
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = buf1
del buf1
triton_poi_fused_add_convolution_mul_relu_2[grid(256)](buf4,
primals_8, primals_9, primals_10, primals_1, primals_11, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
del primals_8
buf6 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf6, primals_13, primals_14, primals_15, buf7, buf20, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
del primals_14
del primals_15
buf8 = extern_kernels.convolution(buf7, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
buf10 = buf6
del buf6
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_relu_threshold_backward_3[grid
(256)](buf9, primals_17, primals_18, primals_19, buf4,
primals_9, primals_10, primals_1, primals_20, buf10, buf21,
buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_10
del primals_17
del primals_19
del primals_20
buf12 = extern_kernels.convolution(buf11, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf12, primals_22, primals_23, primals_24, buf13, buf18, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_22
del primals_23
del primals_24
buf14 = extern_kernels.convolution(buf13, primals_25, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1))
buf15 = buf14
del buf14
buf16 = buf12
del buf12
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_mul_relu_threshold_backward_4[grid
(256)](buf15, primals_26, primals_27, primals_28, buf10, buf16,
buf17, buf19, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf10
del primals_26
del primals_28
return (buf16, primals_3, primals_7, primals_9, primals_12, primals_16,
primals_18, primals_21, primals_25, primals_27, buf0, buf2, buf4,
buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf18, buf19, buf20,
buf21, buf22)
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, padding_mode='zero',
activation='relu'):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.activation(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
crop = (self.ksize - 1) // 2 * 2
if crop > 0 and not self.pad:
identity = identity[:, :, crop:-crop, crop:-crop]
out += identity
out = self.activation2(out)
return out
class FixupResidualChainNew(nn.Module):
"""Linear chain of residual blocks.
Args:
n_features(int): number of input channels.
depth(int): number of residual blocks
ksize(int): size of the convolution kernel (square).
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
pad(bool): if True, zero pad the convs to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
"""
def __init__(self, n_features, depth=3, ksize=3, activation='relu',
norm_layer=None, pad=True, padding_mode='zero'):
super(FixupResidualChainNew, self).__init__()
assert isinstance(n_features, int
) and n_features > 0, 'Number of feature channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
self.depth = depth
layers = OrderedDict()
for lvl in range(depth):
blockname = 'resblock{}'.format(lvl)
layers[blockname] = FixupBasicBlock(n_features, ksize=ksize,
activation=activation, pad=pad, padding_mode=padding_mode)
self.net = nn.Sequential(layers)
self._reset_weights()
def _reset_weights(self):
for m in self.net.modules():
if isinstance(m, FixupBasicBlock):
nn.init.normal_(m.conv1.conv.weight, mean=0, std=np.sqrt(2 /
(m.conv1.conv.weight.shape[0] * np.prod(m.conv1.conv.
weight.shape[2:]))) * self.depth ** -0.5)
nn.init.constant_(m.conv2.conv.weight, 0)
def forward(self, input_0):
primals_2 = self.net.resblock0.bias1a
primals_5 = self.net.resblock0.bias1b
primals_6 = self.net.resblock0.bias2a
primals_9 = self.net.resblock0.scale
primals_10 = self.net.resblock0.bias2b
primals_3 = self.net.resblock0.conv1.conv.weight
primals_4 = self.net.resblock0.conv1.conv.bias
primals_7 = self.net.resblock0.conv2.conv.weight
primals_8 = self.net.resblock0.conv2.conv.bias
primals_11 = self.net.resblock1.bias1a
primals_14 = self.net.resblock1.bias1b
primals_15 = self.net.resblock1.bias2a
primals_18 = self.net.resblock1.scale
primals_19 = self.net.resblock1.bias2b
primals_12 = self.net.resblock1.conv1.conv.weight
primals_13 = self.net.resblock1.conv1.conv.bias
primals_16 = self.net.resblock1.conv2.conv.weight
primals_17 = self.net.resblock1.conv2.conv.bias
primals_20 = self.net.resblock2.bias1a
primals_23 = self.net.resblock2.bias1b
primals_24 = self.net.resblock2.bias2a
primals_27 = self.net.resblock2.scale
primals_28 = self.net.resblock2.bias2b
primals_21 = self.net.resblock2.conv1.conv.weight
primals_22 = self.net.resblock2.conv1.conv.bias
primals_25 = self.net.resblock2.conv2.conv.weight
primals_26 = self.net.resblock2.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28])
return output[0]
|
IlyaBizyaev/ttools
|
FixupResidualChain
| false
| 8,323
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
WeightedSmoothL1Loss
|
import torch
import numpy as np
import torch.nn as nn
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: 'float'=1.0 / 9.0, code_weights: 'list'=None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights)
else:
self.code_weights = None
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-05:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'=None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target)
diff = input - target
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1
] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_div_isnan_lt_mul_pow_sub_where_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = libdevice.isnan(tmp1).to(tl.int1)
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.abs(tmp4)
tmp6 = 0.1111111111111111
tmp7 = tmp5 < tmp6
tmp8 = tmp5 * tmp5
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = 9.0
tmp12 = tmp10 * tmp11
tmp13 = 0.05555555555555555
tmp14 = tmp5 - tmp13
tmp15 = tl.where(tmp7, tmp12, tmp14)
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_div_isnan_lt_mul_pow_sub_where_0[grid(256)](arg1_1
, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class WeightedSmoothL1LossNew(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: 'float'=1.0 / 9.0, code_weights: 'list'=None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1LossNew, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights)
else:
self.code_weights = None
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-05:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Jasonkks/mlcnet
|
WeightedSmoothL1Loss
| false
| 8,324
|
[
"Apache-2.0"
] | 18
|
8f89c860c709733c8baa663607004fc48d76291d
|
https://github.com/Jasonkks/mlcnet/tree/8f89c860c709733c8baa663607004fc48d76291d
|
h_swish
|
import torch
import torch.nn as nn
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return x * self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swishNew(nn.Module):
def __init__(self, inplace=True):
super(h_swishNew, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JaminFong/dali-pytorch
|
h_swish
| false
| 8,325
|
[
"Apache-2.0"
] | 41
|
7bd5d2380d210a32d24c7309da69c8d2c5db8759
|
https://github.com/JaminFong/dali-pytorch/tree/7bd5d2380d210a32d24c7309da69c8d2c5db8759
|
injective_pad
|
import torch
import torch.nn as nn
class injective_pad(nn.Module):
def __init__(self, pad_size):
super(injective_pad, self).__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def forward(self, x):
x = x.permute(0, 2, 1, 3)
x = self.pad(x)
return x.permute(0, 2, 1, 3)
def inverse(self, x):
l = len(x[1])
return x[:, :l - self.pad_size, :, :]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pad_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2 &
xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 4, 32, 1), 0),
class injective_padNew(nn.Module):
def __init__(self, pad_size):
super(injective_padNew, self).__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def inverse(self, x):
l = len(x[1])
return x[:, :l - self.pad_size, :, :]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JessieYuW/CrevNet-Traffic4cast
|
injective_pad
| false
| 8,326
|
[
"Apache-2.0"
] | 13
|
810b2a951de1f99a07bf8cfcbd93e1fc016cce48
|
https://github.com/JessieYuW/CrevNet-Traffic4cast/tree/810b2a951de1f99a07bf8cfcbd93e1fc016cce48
|
GridMixupLoss
|
import math
import random
import torch
import numpy as np
import typing as t
from torch import nn
class GridMixupLoss(nn.Module):
""" Implementation of GridMixup loss
:param alpha: Percent of the first image on the crop. Can be float or Tuple[float, float]
- if float: lambda parameter gets from the beta-distribution np.random.beta(alpha, alpha)
- if Tuple[float, float]: lambda parameter gets from the uniform
distribution np.random.uniform(alpha[0], alpha[1])
:param n_holes_x: Number of holes by OX
:param hole_aspect_ratio: hole aspect ratio
:param crop_area_ratio: Define percentage of the crop area
:param crop_aspect_ratio: Define crop aspect ratio
"""
def __init__(self, alpha: 't.Union[float, t.Tuple[float, float]]'=(0.1,
0.9), n_holes_x: 't.Union[int, t.Tuple[int, int]]'=20,
hole_aspect_ratio: 't.Union[float, t.Tuple[float, float]]'=1.0,
crop_area_ratio: 't.Union[float, t.Tuple[float, float]]'=1.0,
crop_aspect_ratio: 't.Union[float, t.Tuple[float, float]]'=1.0):
super().__init__()
self.alpha = alpha
self.n_holes_x = n_holes_x
self.hole_aspect_ratio = hole_aspect_ratio
self.crop_area_ratio = crop_area_ratio
self.crop_aspect_ratio = crop_aspect_ratio
if isinstance(self.n_holes_x, int):
self.n_holes_x = self.n_holes_x, self.n_holes_x
if isinstance(self.hole_aspect_ratio, float):
self.hole_aspect_ratio = (self.hole_aspect_ratio, self.
hole_aspect_ratio)
if isinstance(self.crop_area_ratio, float):
self.crop_area_ratio = self.crop_area_ratio, self.crop_area_ratio
if isinstance(self.crop_aspect_ratio, float):
self.crop_aspect_ratio = (self.crop_aspect_ratio, self.
crop_aspect_ratio)
self.loss = nn.CrossEntropyLoss()
def __str__(self):
return 'gridmixup'
@staticmethod
def _get_random_crop(height: 'int', width: 'int', crop_area_ratio:
'float', crop_aspect_ratio: 'float') ->t.Tuple:
crop_area = int(height * width * crop_area_ratio)
crop_width = int(np.sqrt(crop_area / crop_aspect_ratio))
crop_height = int(crop_width * crop_aspect_ratio)
cx = np.random.random()
cy = np.random.random()
y1 = int((height - crop_height) * cy)
y2 = y1 + crop_height
x1 = int((width - crop_width) * cx)
x2 = x1 + crop_width
return x1, y1, x2, y2
def _get_gridmask(self, image_shape: 't.Tuple[int, int]',
crop_area_ratio: 'float', crop_aspect_ratio: 'float', lam: 'float',
nx: 'int', ar: 'float') ->np.ndarray:
""" Method make grid mask
:param image_shape: Shape of the images
:param lam: Lambda parameter
:param crop_area_ratio: Ratio of the crop area
:param crop_aspect_ratio: Aspect ratio of the crop
:param nx: Amount of holes by width
:param ar: Aspect ratio of the hole
:return: Binary mask, where holes == 1, background == 0
"""
img_height, img_width = image_shape
xc1, yc1, xc2, yc2 = self._get_random_crop(height=img_height, width
=img_width, crop_area_ratio=crop_area_ratio, crop_aspect_ratio=
crop_aspect_ratio)
height = yc2 - yc1
width = xc2 - xc1
if not 1 <= nx <= width // 2:
raise ValueError(
f'The nx must be between 1 and {width // 2}.\nGive: {nx}')
patch_width = math.ceil(width / nx)
patch_height = int(patch_width * ar)
ny = math.ceil(height / patch_height)
ratio = np.sqrt(1 - lam)
hole_width = int(patch_width * ratio)
hole_height = int(patch_height * ratio)
hole_width = min(max(hole_width, 1), patch_width - 1)
hole_height = min(max(hole_height, 1), patch_height - 1)
holes = []
for i in range(nx + 1):
for j in range(ny + 1):
x1 = min(patch_width * i, width)
y1 = min(patch_height * j, height)
x2 = min(x1 + hole_width, width)
y2 = min(y1 + hole_height, height)
holes.append((x1, y1, x2, y2))
mask = np.zeros(shape=image_shape, dtype=np.uint8)
for x1, y1, x2, y2 in holes:
mask[yc1 + y1:yc1 + y2, xc1 + x1:xc1 + x2] = 1
return mask
def get_sample(self, images: 'torch.Tensor', targets: 'torch.Tensor'
) ->t.Tuple[torch.Tensor, torch.Tensor]:
""" Method returns augmented images and targets
:param images: Batch of non-augmented images
:param targets: Batch of non-augmented targets
:return: Augmented images and targets
"""
indices = torch.randperm(images.size(0))
shuffled_targets = targets[indices]
height, width = images.shape[2:]
if isinstance(self.alpha, float):
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = np.random.uniform(self.alpha[0], self.alpha[1])
nx = random.randint(self.n_holes_x[0], self.n_holes_x[1])
ar = np.random.uniform(self.hole_aspect_ratio[0], self.
hole_aspect_ratio[1])
crop_area_ratio = np.random.uniform(self.crop_area_ratio[0], self.
crop_area_ratio[1])
crop_aspect_ratio = np.random.uniform(self.crop_aspect_ratio[0],
self.crop_aspect_ratio[1])
mask = self._get_gridmask(image_shape=(height, width),
crop_area_ratio=crop_area_ratio, crop_aspect_ratio=
crop_aspect_ratio, lam=lam, nx=nx, ar=ar)
lam = 1 - mask.sum() / (images.size()[-1] * images.size()[-2])
mask = torch.from_numpy(mask)
images = images * (1 - mask) + images[indices, ...] * mask
lam_list = torch.from_numpy(np.ones(shape=targets.shape) * lam)
out_targets = torch.cat([targets, shuffled_targets, lam_list], dim=1
).transpose(0, 1)
return images, out_targets
def forward(self, preds: 'torch.Tensor', trues: 'torch.Tensor'
) ->torch.Tensor:
lam = trues[-1][0].float()
trues1, trues2 = trues[0].long(), trues[1].long()
loss = self.loss(preds, trues1) * lam + self.loss(preds, trues2) * (
1 - lam)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import random
import numpy as np
import typing as t
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__to_copy_nll_loss2d_forward_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + r2, None)
tmp12 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([1, 1], -100, tl.int64)
tmp3 = tmp1 != tmp2
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.where(tmp3, tmp1, tmp4)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4),
'index out of bounds: 0 <= tmp9 < 4')
tmp11 = tl.load(in_ptr1 + (r0 + 16 * tmp9 + 64 * r1), None)
tmp13 = tl_math.exp(tmp12)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp11 - tmp23
tmp25 = -tmp24
tmp26 = 0.0
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = tmp3.to(tl.int64)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None)
@triton.jit
def triton_per_fused__to_copy_nll_loss2d_forward_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (64 + r2), None)
tmp12 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([1, 1], -100, tl.int64)
tmp3 = tmp1 != tmp2
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.where(tmp3, tmp1, tmp4)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4),
'index out of bounds: 0 <= tmp9 < 4')
tmp11 = tl.load(in_ptr1 + (r0 + 16 * tmp9 + 64 * r1), None)
tmp13 = tl_math.exp(tmp12)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp11 - tmp23
tmp25 = -tmp24
tmp26 = 0.0
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = tmp3.to(tl.int64)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None)
@triton.jit
def triton_poi_fused_add_mul_nll_loss2d_forward_rsub_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr4 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp1 / tmp4
tmp7 = tmp5 * tmp6
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp9 / tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp6
tmp16 = tmp13 * tmp15
tmp17 = tmp7 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.int64)
triton_per_fused__to_copy_nll_loss2d_forward_1[grid(1)](arg0_1,
buf0, buf1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = empty_strided_cuda((), (), torch.int64)
triton_per_fused__to_copy_nll_loss2d_forward_2[grid(1)](arg0_1,
buf3, buf4, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf3
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_nll_loss2d_forward_rsub_3[grid(16)](buf1,
buf2, arg0_1, buf4, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del arg0_1
del buf1
del buf2
del buf4
del buf5
return buf6,
class GridMixupLossNew(nn.Module):
""" Implementation of GridMixup loss
:param alpha: Percent of the first image on the crop. Can be float or Tuple[float, float]
- if float: lambda parameter gets from the beta-distribution np.random.beta(alpha, alpha)
- if Tuple[float, float]: lambda parameter gets from the uniform
distribution np.random.uniform(alpha[0], alpha[1])
:param n_holes_x: Number of holes by OX
:param hole_aspect_ratio: hole aspect ratio
:param crop_area_ratio: Define percentage of the crop area
:param crop_aspect_ratio: Define crop aspect ratio
"""
def __init__(self, alpha: 't.Union[float, t.Tuple[float, float]]'=(0.1,
0.9), n_holes_x: 't.Union[int, t.Tuple[int, int]]'=20,
hole_aspect_ratio: 't.Union[float, t.Tuple[float, float]]'=1.0,
crop_area_ratio: 't.Union[float, t.Tuple[float, float]]'=1.0,
crop_aspect_ratio: 't.Union[float, t.Tuple[float, float]]'=1.0):
super().__init__()
self.alpha = alpha
self.n_holes_x = n_holes_x
self.hole_aspect_ratio = hole_aspect_ratio
self.crop_area_ratio = crop_area_ratio
self.crop_aspect_ratio = crop_aspect_ratio
if isinstance(self.n_holes_x, int):
self.n_holes_x = self.n_holes_x, self.n_holes_x
if isinstance(self.hole_aspect_ratio, float):
self.hole_aspect_ratio = (self.hole_aspect_ratio, self.
hole_aspect_ratio)
if isinstance(self.crop_area_ratio, float):
self.crop_area_ratio = self.crop_area_ratio, self.crop_area_ratio
if isinstance(self.crop_aspect_ratio, float):
self.crop_aspect_ratio = (self.crop_aspect_ratio, self.
crop_aspect_ratio)
self.loss = nn.CrossEntropyLoss()
def __str__(self):
return 'gridmixup'
@staticmethod
def _get_random_crop(height: 'int', width: 'int', crop_area_ratio:
'float', crop_aspect_ratio: 'float') ->t.Tuple:
crop_area = int(height * width * crop_area_ratio)
crop_width = int(np.sqrt(crop_area / crop_aspect_ratio))
crop_height = int(crop_width * crop_aspect_ratio)
cx = np.random.random()
cy = np.random.random()
y1 = int((height - crop_height) * cy)
y2 = y1 + crop_height
x1 = int((width - crop_width) * cx)
x2 = x1 + crop_width
return x1, y1, x2, y2
def _get_gridmask(self, image_shape: 't.Tuple[int, int]',
crop_area_ratio: 'float', crop_aspect_ratio: 'float', lam: 'float',
nx: 'int', ar: 'float') ->np.ndarray:
""" Method make grid mask
:param image_shape: Shape of the images
:param lam: Lambda parameter
:param crop_area_ratio: Ratio of the crop area
:param crop_aspect_ratio: Aspect ratio of the crop
:param nx: Amount of holes by width
:param ar: Aspect ratio of the hole
:return: Binary mask, where holes == 1, background == 0
"""
img_height, img_width = image_shape
xc1, yc1, xc2, yc2 = self._get_random_crop(height=img_height, width
=img_width, crop_area_ratio=crop_area_ratio, crop_aspect_ratio=
crop_aspect_ratio)
height = yc2 - yc1
width = xc2 - xc1
if not 1 <= nx <= width // 2:
raise ValueError(
f'The nx must be between 1 and {width // 2}.\nGive: {nx}')
patch_width = math.ceil(width / nx)
patch_height = int(patch_width * ar)
ny = math.ceil(height / patch_height)
ratio = np.sqrt(1 - lam)
hole_width = int(patch_width * ratio)
hole_height = int(patch_height * ratio)
hole_width = min(max(hole_width, 1), patch_width - 1)
hole_height = min(max(hole_height, 1), patch_height - 1)
holes = []
for i in range(nx + 1):
for j in range(ny + 1):
x1 = min(patch_width * i, width)
y1 = min(patch_height * j, height)
x2 = min(x1 + hole_width, width)
y2 = min(y1 + hole_height, height)
holes.append((x1, y1, x2, y2))
mask = np.zeros(shape=image_shape, dtype=np.uint8)
for x1, y1, x2, y2 in holes:
mask[yc1 + y1:yc1 + y2, xc1 + x1:xc1 + x2] = 1
return mask
def get_sample(self, images: 'torch.Tensor', targets: 'torch.Tensor'
) ->t.Tuple[torch.Tensor, torch.Tensor]:
""" Method returns augmented images and targets
:param images: Batch of non-augmented images
:param targets: Batch of non-augmented targets
:return: Augmented images and targets
"""
indices = torch.randperm(images.size(0))
shuffled_targets = targets[indices]
height, width = images.shape[2:]
if isinstance(self.alpha, float):
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = np.random.uniform(self.alpha[0], self.alpha[1])
nx = random.randint(self.n_holes_x[0], self.n_holes_x[1])
ar = np.random.uniform(self.hole_aspect_ratio[0], self.
hole_aspect_ratio[1])
crop_area_ratio = np.random.uniform(self.crop_area_ratio[0], self.
crop_area_ratio[1])
crop_aspect_ratio = np.random.uniform(self.crop_aspect_ratio[0],
self.crop_aspect_ratio[1])
mask = self._get_gridmask(image_shape=(height, width),
crop_area_ratio=crop_area_ratio, crop_aspect_ratio=
crop_aspect_ratio, lam=lam, nx=nx, ar=ar)
lam = 1 - mask.sum() / (images.size()[-1] * images.size()[-2])
mask = torch.from_numpy(mask)
images = images * (1 - mask) + images[indices, ...] * mask
lam_list = torch.from_numpy(np.ones(shape=targets.shape) * lam)
out_targets = torch.cat([targets, shuffled_targets, lam_list], dim=1
).transpose(0, 1)
return images, out_targets
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IlyaDobrynin/GridMixup
|
GridMixupLoss
| false
| 8,327
|
[
"MIT"
] | 42
|
11b741f234832c9a15b4e650e1e4fad0e79dc63b
|
https://github.com/IlyaDobrynin/GridMixup/tree/11b741f234832c9a15b4e650e1e4fad0e79dc63b
|
DiagonalQuantizer
|
import torch
import numpy as np
import torch.cuda
import torch.fft
def diagonal_quantize_function(x, bit, phase_noise_std=0, random_state=None,
gradient_clip=False):
class DiagonalQuantizeFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
S_scale = x.abs().max(dim=-1, keepdim=True)[0]
x = (x / S_scale).acos()
ratio = np.pi / (2 ** bit - 1)
x.div_(ratio).round_().mul_(ratio)
if phase_noise_std > 1e-05:
noise = gen_gaussian_noise(x, noise_mean=0, noise_std=
phase_noise_std, trunc_range=[-2 * phase_noise_std, 2 *
phase_noise_std], random_state=random_state)
x.add_(noise)
x.cos_().mul_(S_scale)
return x
@staticmethod
def backward(ctx, grad_output):
if gradient_clip:
grad_input = grad_output.clamp(-1, 1)
else:
grad_input = grad_output.clone()
return grad_input
return DiagonalQuantizeFunction.apply(x)
class DiagonalQuantizer(torch.nn.Module):
def __init__(self, bit, phase_noise_std=0.0, random_state=None, device=
torch.device('cuda')):
"""2021/02/18: New phase quantizer for Sigma matrix in MZI-ONN. Gaussian phase noise is supported. All singular values are normalized by a TIA gain (S_scale), the normalized singular values will be achieved by cos(phi), phi will have [0, pi] uniform quantization.
We do not consider real MZI implementation, thus voltage quantization and gamma noises are not supported.
Args:
bit (int): bitwidth for phase quantization.
phase_noise_std (float, optional): Std dev for Gaussian phase noises. Defaults to 0.
random_state (int, optional): random_state to control random noise injection. Defaults to None.
device (torch.Device, optional): torch.Device. Defaults to torch.device("cuda").
"""
super().__init__()
self.bit = bit
self.phase_noise_std = phase_noise_std
self.random_state = random_state
self.device = device
def set_phase_noise_std(self, phase_noise_std=0, random_state=None):
self.phase_noise_std = phase_noise_std
self.random_state = random_state
def forward(self, x):
x = diagonal_quantize_function(x, self.bit, self.phase_noise_std,
self.random_state, gradient_clip=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'bit': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.cuda
import torch.fft
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_acos_cos_div_max_mul_round_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.abs(tmp1)
tmp4 = tl_math.abs(tmp3)
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tl_math.abs(tmp6)
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tl_math.abs(tmp9)
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp0 / tmp11
tmp13 = libdevice.acos(tmp12)
tmp14 = 4.7746482927568605
tmp15 = tmp13 * tmp14
tmp16 = libdevice.nearbyint(tmp15)
tmp17 = 0.20943951023931953
tmp18 = tmp16 * tmp17
tmp19 = tl_math.cos(tmp18)
tmp20 = tmp19 * tmp11
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_acos_cos_div_max_mul_round_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def diagonal_quantize_function(x, bit, phase_noise_std=0, random_state=None,
gradient_clip=False):
class DiagonalQuantizeFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
S_scale = x.abs().max(dim=-1, keepdim=True)[0]
x = (x / S_scale).acos()
ratio = np.pi / (2 ** bit - 1)
x.div_(ratio).round_().mul_(ratio)
if phase_noise_std > 1e-05:
noise = gen_gaussian_noise(x, noise_mean=0, noise_std=
phase_noise_std, trunc_range=[-2 * phase_noise_std, 2 *
phase_noise_std], random_state=random_state)
x.add_(noise)
x.cos_().mul_(S_scale)
return x
@staticmethod
def backward(ctx, grad_output):
if gradient_clip:
grad_input = grad_output.clamp(-1, 1)
else:
grad_input = grad_output.clone()
return grad_input
return DiagonalQuantizeFunction.apply(x)
class DiagonalQuantizerNew(torch.nn.Module):
def __init__(self, bit, phase_noise_std=0.0, random_state=None, device=
torch.device('cuda')):
"""2021/02/18: New phase quantizer for Sigma matrix in MZI-ONN. Gaussian phase noise is supported. All singular values are normalized by a TIA gain (S_scale), the normalized singular values will be achieved by cos(phi), phi will have [0, pi] uniform quantization.
We do not consider real MZI implementation, thus voltage quantization and gamma noises are not supported.
Args:
bit (int): bitwidth for phase quantization.
phase_noise_std (float, optional): Std dev for Gaussian phase noises. Defaults to 0.
random_state (int, optional): random_state to control random noise injection. Defaults to None.
device (torch.Device, optional): torch.Device. Defaults to torch.device("cuda").
"""
super().__init__()
self.bit = bit
self.phase_noise_std = phase_noise_std
self.random_state = random_state
self.device = device
def set_phase_noise_std(self, phase_noise_std=0, random_state=None):
self.phase_noise_std = phase_noise_std
self.random_state = random_state
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JeremieMelo/pytorch-onn
|
DiagonalQuantizer
| false
| 8,328
|
[
"MIT"
] | 16
|
670996112277a6c19c7da400afbe0a4ce45ad5de
|
https://github.com/JeremieMelo/pytorch-onn/tree/670996112277a6c19c7da400afbe0a4ce45ad5de
|
AffineConstantFlow
|
import torch
from torch import nn
class AffineConstantFlow(nn.Module):
"""
Scales + Shifts the flow by (learned) constants per dimension.
In NICE paper there is a Scaling layer which is a special case of this where t is None
"""
def __init__(self, dim, scale=True, shift=True):
super().__init__()
self.s = nn.Parameter(torch.randn(1, dim, requires_grad=True)
) if scale else None
self.t = nn.Parameter(torch.randn(1, dim, requires_grad=True)
) if shift else None
def forward(self, x):
s = self.s if self.s is not None else x.new_zeros(x.size())
t = self.t if self.t is not None else x.new_zeros(x.size())
z = x * torch.exp(s) + t
log_det = torch.sum(s, dim=1)
return z, log_det
def backward(self, z):
s = self.s if self.s is not None else z.new_zeros(z.size())
t = self.t if self.t is not None else z.new_zeros(z.size())
x = (z - t) * torch.exp(-s)
log_det = torch.sum(-s, dim=1)
return x, log_det
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_mul_0[grid(256)](primals_3, primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((1,), (1,), torch.float32)
triton_per_fused_sum_1[grid(1)](primals_1, buf1, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
return buf0, buf1, primals_1, primals_3
class AffineConstantFlowNew(nn.Module):
"""
Scales + Shifts the flow by (learned) constants per dimension.
In NICE paper there is a Scaling layer which is a special case of this where t is None
"""
def __init__(self, dim, scale=True, shift=True):
super().__init__()
self.s = nn.Parameter(torch.randn(1, dim, requires_grad=True)
) if scale else None
self.t = nn.Parameter(torch.randn(1, dim, requires_grad=True)
) if shift else None
def backward(self, z):
s = self.s if self.s is not None else z.new_zeros(z.size())
t = self.t if self.t is not None else z.new_zeros(z.size())
x = (z - t) * torch.exp(-s)
log_det = torch.sum(-s, dim=1)
return x, log_det
def forward(self, input_0):
primals_1 = self.s
primals_2 = self.t
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
JannerM/gamma-models
|
AffineConstantFlow
| false
| 8,329
|
[
"MIT"
] | 32
|
4b40d828bf228385c3081d359cdc3494d70de4a1
|
https://github.com/JannerM/gamma-models/tree/4b40d828bf228385c3081d359cdc3494d70de4a1
|
SqueezeExcite
|
import torch
from torchvision.transforms import functional as F
import torch.nn as nn
import torch.nn.functional as F
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision.transforms import functional as F
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf4, primals_5, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExciteNew(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExciteNew, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, input_0):
primals_2 = self.conv_reduce.weight
primals_3 = self.conv_reduce.bias
primals_4 = self.conv_expand.weight
primals_5 = self.conv_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JaminFong/dali-pytorch
|
SqueezeExcite
| false
| 8,330
|
[
"Apache-2.0"
] | 41
|
7bd5d2380d210a32d24c7309da69c8d2c5db8759
|
https://github.com/JaminFong/dali-pytorch/tree/7bd5d2380d210a32d24c7309da69c8d2c5db8759
|
Upsample
|
import torch
import torch.nn as nn
class Upsample(nn.Upsample):
"""
Upsampling via interporlation
Args:
x: (N, T, C)
Returns:
y: (N, S * T, C)
(S: scale_factor)
"""
def __init__(self, scale_factor=2, mode='nearest'):
super(Upsample, self).__init__(scale_factor=scale_factor, mode=mode)
def forward(self, x):
x = x.transpose(1, 2)
x = super(Upsample, self).forward(x)
x = x.transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 4
x3 = xindex // 256
x5 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * x2 + 16 * tmp4 + 64 * x3), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](arg0_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 8, 4, 8), (256, 8, 64, 1), 0),
class UpsampleNew(nn.Upsample):
"""
Upsampling via interporlation
Args:
x: (N, T, C)
Returns:
y: (N, S * T, C)
(S: scale_factor)
"""
def __init__(self, scale_factor=2, mode='nearest'):
super(UpsampleNew, self).__init__(scale_factor=scale_factor, mode=mode)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Jackson-Kang/VQVC-Pytorch
|
Upsample
| false
| 8,331
|
[
"MIT"
] | 13
|
d2267b5c52253b6ae11a5767963a65320ae335c2
|
https://github.com/Jackson-Kang/VQVC-Pytorch/tree/d2267b5c52253b6ae11a5767963a65320ae335c2
|
GraphConvolution
|
from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn.modules.loss
from scipy.sparse import *
def dropout(x, drop_prob, shared_axes=[], training=False):
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied.
"""
if drop_prob == 0 or drop_prob is None or not training:
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob)
mask = mask.expand_as(x)
return x * mask
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.nn.functional as F
from torch.nn import Parameter
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
def dropout(x, drop_prob, shared_axes=[], training=False):
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied.
"""
if drop_prob == 0 or drop_prob is None or not training:
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob)
mask = mask.expand_as(x)
return x * mask
class GraphConvolutionNew(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolutionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
IBM/graph4nlp
|
GraphConvolution
| false
| 8,332
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
Context2AnswerAttention
|
import torch
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
class Context2AnswerAttention(nn.Module):
def __init__(self, dim, hidden_size):
super(Context2AnswerAttention, self).__init__()
self.linear_sim = nn.Linear(dim, hidden_size, bias=False)
def forward(self, context, answers, out_answers, ans_mask=None):
"""
Parameters
:context, (batch_size, L, dim)
:answers, (batch_size, N, dim)
:out_answers, (batch_size, N, dim)
:ans_mask, (batch_size, N)
Returns
:ques_emb, (batch_size, L, dim)
"""
context_fc = torch.relu(self.linear_sim(context))
questions_fc = torch.relu(self.linear_sim(answers))
attention = torch.matmul(context_fc, questions_fc.transpose(-1, -2))
if ans_mask is not None:
attention = attention.masked_fill_(1 - ans_mask.byte().
unsqueeze(1), -INF)
prob = torch.softmax(attention, dim=-1)
ques_emb = torch.matmul(prob, out_answers)
return ques_emb
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1)
del primals_1
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, buf9,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, buf8,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0),
out=buf7)
del buf6
return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), buf8, buf9
class Context2AnswerAttentionNew(nn.Module):
def __init__(self, dim, hidden_size):
super(Context2AnswerAttentionNew, self).__init__()
self.linear_sim = nn.Linear(dim, hidden_size, bias=False)
def forward(self, input_0, input_1, input_2):
primals_1 = self.linear_sim.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
IBM/graph4nlp
|
Context2AnswerAttention
| false
| 8,333
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
decoder5
|
import torch
from torch import nn
class decoder5(nn.Module):
def __init__(self):
super(decoder5, self).__init__()
self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv15 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv16 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu16 = nn.ReLU(inplace=True)
self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv17 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu17 = nn.ReLU(inplace=True)
self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv18 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu18 = nn.ReLU(inplace=True)
self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv19 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu19 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad20 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv20 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu20 = nn.ReLU(inplace=True)
self.reflecPad21 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv21 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu21 = nn.ReLU(inplace=True)
self.reflecPad22 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv22 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu22 = nn.ReLU(inplace=True)
self.reflecPad23 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv23 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu23 = nn.ReLU(inplace=True)
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad24 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv24 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu24 = nn.ReLU(inplace=True)
self.reflecPad25 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv25 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu25 = nn.ReLU(inplace=True)
self.unpool4 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad26 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv26 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu26 = nn.ReLU(inplace=True)
self.reflecPad27 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv27 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x):
out = self.reflecPad15(x)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
out = self.relu19(out)
out = self.unpool2(out)
out = self.reflecPad20(out)
out = self.conv20(out)
out = self.relu20(out)
out = self.reflecPad21(out)
out = self.conv21(out)
out = self.relu21(out)
out = self.reflecPad22(out)
out = self.conv22(out)
out = self.relu22(out)
out = self.reflecPad23(out)
out = self.conv23(out)
out = self.relu23(out)
out = self.unpool3(out)
out = self.reflecPad24(out)
out = self.conv24(out)
out = self.relu24(out)
out = self.reflecPad25(out)
out = self.conv25(out)
out = self.relu25(out)
out = self.unpool4(out)
out = self.reflecPad26(out)
out = self.conv26(out)
out = self.relu26(out)
out = self.reflecPad27(out)
out = self.conv27(out)
return out
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 512
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 512
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused_arange_10(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_11(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12(in_ptr0
, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 66 % 66
x0 = xindex % 66
x4 = xindex // 4356
x2 = xindex // 4356 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_13(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = xindex // 66 % 66
x4 = xindex // 4356
x2 = xindex // 4356 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (128,), (1,))
assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_27, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 4, 4), (8192, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(204800)](buf2, buf1, primals_3, buf3, 204800, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 8, 8), (32768, 64, 8, 1))
buf5 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf4
, primals_5, buf5, 204800, XBLOCK=1024, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 512, 8, 8), (32768, 64, 8, 1))
buf7 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf6
, primals_7, buf7, 204800, XBLOCK=1024, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 512, 8, 8), (32768, 64, 8, 1))
buf9 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf8
, primals_9, buf9, 204800, XBLOCK=1024, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 8, 8), (16384, 64, 8, 1))
buf11 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(331776)](buf11, buf10, primals_11, buf12, 331776, XBLOCK=512,
num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 256, 16, 16), (65536, 256, 16, 1))
buf14 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)](
buf13, primals_13, buf14, 331776, XBLOCK=512, num_warps=8,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 16, 16), (65536, 256, 16, 1))
buf16 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)](
buf15, primals_15, buf16, 331776, XBLOCK=512, num_warps=8,
num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 16, 16), (65536, 256, 16, 1))
buf18 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)](
buf17, primals_17, buf18, 331776, XBLOCK=512, num_warps=8,
num_stages=1)
buf19 = extern_kernels.convolution(buf18, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf20, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid
(591872)](buf20, buf19, primals_19, buf21, 591872, XBLOCK=1024,
num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf23 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(591872)](
buf22, primals_21, buf23, 591872, XBLOCK=1024, num_warps=4,
num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf25 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_arange_10[grid(64)](buf25, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_11[grid(64)](buf26, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12[
grid(1115136)](buf26, buf24, primals_23, buf27, 1115136, XBLOCK
=512, num_warps=8, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf29 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_13[grid(1115136)](
buf28, primals_25, buf29, 1115136, XBLOCK=1024, num_warps=4,
num_stages=1)
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_14[grid(49152)](buf31, primals_27,
49152, XBLOCK=512, num_warps=4, num_stages=1)
del primals_27
buf32 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(1048576)](
buf28, primals_25, buf32, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf28
del primals_25
buf33 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(262144)](
buf24, primals_23, buf33, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf24
del primals_23
buf34 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_17[grid(524288)](
buf22, primals_21, buf34, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf22
del primals_21
buf35 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_18[grid(131072)](
buf19, primals_19, buf35, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf19
del primals_19
buf36 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)](
buf17, primals_17, buf36, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf17
del primals_17
buf37 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)](
buf15, primals_15, buf37, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)](
buf13, primals_13, buf38, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_13
buf39 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_20[grid(65536)](
buf10, primals_11, buf39, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf10
del primals_11
buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)](
buf8, primals_9, buf40, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf8
del primals_9
buf41 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)](
buf6, primals_7, buf41, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf42 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)](
buf4, primals_5, buf42, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf43 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_22[grid(32768)](
buf1, primals_3, buf43, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf31, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, primals_20,
primals_22, primals_24, primals_26, buf0, buf2, buf3, buf5, buf7,
buf9, buf11, buf12, buf14, buf16, buf18, buf20, buf21, buf23, buf25,
buf26, buf27, buf29, buf32, buf33, buf34, buf35, buf36, buf37,
buf38, buf39, buf40, buf41, buf42, buf43)
class decoder5New(nn.Module):
def __init__(self):
super(decoder5New, self).__init__()
self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv15 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv16 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu16 = nn.ReLU(inplace=True)
self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv17 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu17 = nn.ReLU(inplace=True)
self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv18 = nn.Conv2d(512, 512, 3, 1, 0)
self.relu18 = nn.ReLU(inplace=True)
self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv19 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu19 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad20 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv20 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu20 = nn.ReLU(inplace=True)
self.reflecPad21 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv21 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu21 = nn.ReLU(inplace=True)
self.reflecPad22 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv22 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu22 = nn.ReLU(inplace=True)
self.reflecPad23 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv23 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu23 = nn.ReLU(inplace=True)
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad24 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv24 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu24 = nn.ReLU(inplace=True)
self.reflecPad25 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv25 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu25 = nn.ReLU(inplace=True)
self.unpool4 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad26 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv26 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu26 = nn.ReLU(inplace=True)
self.reflecPad27 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv27 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, input_0):
primals_2 = self.conv15.weight
primals_3 = self.conv15.bias
primals_4 = self.conv16.weight
primals_5 = self.conv16.bias
primals_6 = self.conv17.weight
primals_7 = self.conv17.bias
primals_8 = self.conv18.weight
primals_9 = self.conv18.bias
primals_10 = self.conv19.weight
primals_11 = self.conv19.bias
primals_12 = self.conv20.weight
primals_13 = self.conv20.bias
primals_14 = self.conv21.weight
primals_15 = self.conv21.bias
primals_16 = self.conv22.weight
primals_17 = self.conv22.bias
primals_18 = self.conv23.weight
primals_19 = self.conv23.bias
primals_20 = self.conv24.weight
primals_21 = self.conv24.bias
primals_22 = self.conv25.weight
primals_23 = self.conv25.bias
primals_24 = self.conv26.weight
primals_25 = self.conv26.bias
primals_26 = self.conv27.weight
primals_27 = self.conv27.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
|
Holmes-Alan/RefVAE
|
decoder5
| false
| 8,334
|
[
"MIT"
] | 13
|
836b8f1168f1b0f923b609a48e202ace7806f79c
|
https://github.com/Holmes-Alan/RefVAE/tree/836b8f1168f1b0f923b609a48e202ace7806f79c
|
LxmertAttentionOutput
|
import torch
import torch.utils.data
import torch.nn as nn
import torch
import torch.nn.parallel
class LxmertAttentionOutput(nn.Module):
def __init__(self, hidden_size, hidden_dropout_prob):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'hidden_dropout_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
import torch
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3,
primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del buf3
del primals_6
return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1
class LxmertAttentionOutputNew(nn.Module):
def __init__(self, hidden_size, hidden_dropout_prob):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
IsmaelElsharkawi/new_pororo_repo
|
LxmertAttentionOutput
| false
| 8,335
|
[
"MIT"
] | 19
|
4617083b420615b8a3eb0f44d02e4e91a8f407f7
|
https://github.com/IsmaelElsharkawi/new_pororo_repo/tree/4617083b420615b8a3eb0f44d02e4e91a8f407f7
|
MixActiv
|
import torch
import torch as th
from torch import nn
def gauss(x, mean=0, std=1):
return th.exp(-(x - mean) ** 2 / (2 * std ** 2))
class MixActiv(nn.Module):
def __init__(self):
super().__init__()
self.activations = th.sin, th.tanh, gauss, th.relu
self.n_activs = len(self.activations)
def forward(self, x):
n_chan = x.shape[1]
chans_per_activ = n_chan / self.n_activs
chan_i = 0
xs = []
for i, activ in enumerate(self.activations):
xs.append(activ(x[:, int(chan_i):int(chan_i + chans_per_activ),
:, :]))
chan_i += chans_per_activ
x = th.cat(xs, axis=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch as th
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tmp10 = tl.full([1], 2, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tmp9 & tmp11
tmp13 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = libdevice.tanh(tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp12, tmp14, tmp15)
tmp17 = tmp0 >= tmp10
tmp18 = tl.full([1], 3, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = 0.0
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = -tmp24
tmp26 = 0.5
tmp27 = tmp25 * tmp26
tmp28 = tl_math.exp(tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tmp0 >= tmp18
tl.full([1], 4, tl.int64)
tmp34 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.full([1], 0, tl.int32)
tmp36 = triton_helpers.maximum(tmp35, tmp34)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp20, tmp30, tmp38)
tmp40 = tl.where(tmp12, tmp16, tmp39)
tmp41 = tl.where(tmp4, tmp8, tmp40)
tl.store(out_ptr0 + x3, tmp41, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def gauss(x, mean=0, std=1):
return th.exp(-(x - mean) ** 2 / (2 * std ** 2))
class MixActivNew(nn.Module):
def __init__(self):
super().__init__()
self.activations = th.sin, th.tanh, gauss, th.relu
self.n_activs = len(self.activations)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JiangZehua/control-pcgrl
|
MixActiv
| false
| 8,336
|
[
"MIT"
] | 15
|
e4fd1bf9670e5855f04941ebca34170517c451b4
|
https://github.com/JiangZehua/control-pcgrl/tree/e4fd1bf9670e5855f04941ebca34170517c451b4
|
ClassPredictor
|
import torch
from torch import nn
class ClassPredictor(nn.Module):
def __init__(self, nz_feat, max_object_classes):
super(ClassPredictor, self).__init__()
self.predictor = nn.Linear(nz_feat, max_object_classes)
def forward(self, feats):
class_logits = self.predictor(feats)
return torch.nn.functional.log_softmax(class_logits)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nz_feat': 4, 'max_object_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class ClassPredictorNew(nn.Module):
def __init__(self, nz_feat, max_object_classes):
super(ClassPredictorNew, self).__init__()
self.predictor = nn.Linear(nz_feat, max_object_classes)
def forward(self, input_0):
primals_1 = self.predictor.weight
primals_2 = self.predictor.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JasonQSY/Associative3D
|
ClassPredictor
| false
| 8,337
|
[
"MIT"
] | 25
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
|
InnerProductDecoder
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.modules.loss
from scipy.sparse import *
def dropout(x, drop_prob, shared_axes=[], training=False):
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied.
"""
if drop_prob == 0 or drop_prob is None or not training:
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob)
mask = mask.expand_as(x)
return x * mask
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.mm(z, z.t()))
return adj
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
del arg0_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16)](buf1, 16, XBLOCK=16, num_warps
=1, num_stages=1)
return buf1,
def dropout(x, drop_prob, shared_axes=[], training=False):
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied.
"""
if drop_prob == 0 or drop_prob is None or not training:
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob)
mask = mask.expand_as(x)
return x * mask
class InnerProductDecoderNew(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoderNew, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IBM/graph4nlp
|
InnerProductDecoder
| false
| 8,338
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
GRUStep
|
import torch
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
class GRUStep(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStep, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, h_state, input):
z = torch.sigmoid(self.linear_z(torch.cat([h_state, input], -1)))
r = torch.sigmoid(self.linear_r(torch.cat([h_state, input], -1)))
t = torch.tanh(self.linear_t(torch.cat([r * h_state, input], -1)))
h_state = (1 - z) * h_state + z * t
return h_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](buf2, primals_1, primals_2, buf3,
512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(256)](buf1,
primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf1, buf2, reinterpret_tensor(buf3, (64, 8), (8, 1), 0
), buf4, primals_5
class GRUStepNew(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStepNew, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, input_0, input_1):
primals_3 = self.linear_z.weight
primals_4 = self.linear_r.weight
primals_5 = self.linear_t.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
IBM/graph4nlp
|
GRUStep
| false
| 8,339
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
ScalePredictor
|
import torch
from torch import nn
class ScalePredictor(nn.Module):
def __init__(self, nz):
super(ScalePredictor, self).__init__()
self.pred_layer = nn.Linear(nz, 3)
def forward(self, feat):
scale = self.pred_layer.forward(feat) + 1
scale = torch.nn.functional.relu(scale) + 1e-12
return scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nz': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = 0.0
tmp10 = tmp6 <= tmp9
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3, 4), (4, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 3), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(192)](buf0,
primals_2, buf1, buf2, 192, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class ScalePredictorNew(nn.Module):
def __init__(self, nz):
super(ScalePredictorNew, self).__init__()
self.pred_layer = nn.Linear(nz, 3)
def forward(self, input_0):
primals_1 = self.pred_layer.weight
primals_2 = self.pred_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JasonQSY/Associative3D
|
ScalePredictor
| false
| 8,340
|
[
"MIT"
] | 25
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
|
CombinedTargetMSELoss
|
import torch
import torch.nn as nn
class CombinedTargetMSELoss(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred * target_weight[:, idx]
heatmap_gt = heatmap_gt * target_weight[:, idx]
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints * self.loss_weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CombinedTargetMSELossNew(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Jackqu/mmpose
|
CombinedTargetMSELoss
| false
| 8,341
|
[
"Apache-2.0"
] | 38
|
ad8acc5ff5da7993c6befdc4b1ced2c2ecb64533
|
https://github.com/Jackqu/mmpose/tree/ad8acc5ff5da7993c6befdc4b1ced2c2ecb64533
|
RelativeScalePredictor
|
import torch
from torch import nn
class RelativeScalePredictor(nn.Module):
def __init__(self, in_size, out_size):
super(RelativeScalePredictor, self).__init__()
self.predictor = nn.Linear(in_size, out_size)
def forward(self, feat):
predictions = self.predictor.forward(feat) + 1
predictions = torch.nn.functional.relu(predictions) + 1e-12
return predictions.log()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'out_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_log_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 1e-12
tmp6 = tmp4 + tmp5
tmp7 = tl_math.log(tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_relu_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class RelativeScalePredictorNew(nn.Module):
def __init__(self, in_size, out_size):
super(RelativeScalePredictorNew, self).__init__()
self.predictor = nn.Linear(in_size, out_size)
def forward(self, input_0):
primals_1 = self.predictor.weight
primals_2 = self.predictor.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JasonQSY/Associative3D
|
RelativeScalePredictor
| false
| 8,342
|
[
"MIT"
] | 25
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
|
UpSample
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class UpSample(nn.Sequential):
def __init__(self, skip_input, output_features):
super(UpSample, self).__init__()
self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3,
stride=1, padding=1)
self.leakyreluA = nn.LeakyReLU(0.2)
self.convB = nn.Conv2d(output_features, output_features,
kernel_size=3, stride=1, padding=1)
self.leakyreluB = nn.LeakyReLU(0.2)
def forward(self, x, concat_with):
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size
(3)], mode='bilinear', align_corners=True)
return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch.
cat([up_x, concat_with], dim=1)))))
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {'skip_input': 4, 'output_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex // 48
x7 = xindex % 48
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(out_ptr1 + (x7 + 64 * x4), tmp38, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0)
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(192)](primals_2, buf1, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48)
triton_poi_fused_cat_1[grid(64)](primals_1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf4,
primals_4, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf9 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf7,
primals_6, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_6
return buf9, primals_3, primals_5, buf3, buf5, buf6, buf8
class UpSampleNew(nn.Sequential):
def __init__(self, skip_input, output_features):
super(UpSampleNew, self).__init__()
self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3,
stride=1, padding=1)
self.leakyreluA = nn.LeakyReLU(0.2)
self.convB = nn.Conv2d(output_features, output_features,
kernel_size=3, stride=1, padding=1)
self.leakyreluB = nn.LeakyReLU(0.2)
def forward(self, input_0, input_1):
primals_3 = self.convA.weight
primals_4 = self.convA.bias
primals_5 = self.convB.weight
primals_6 = self.convB.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
JanRocketMan/regression-prior-networks
|
UpSample
| false
| 8,343
|
[
"MIT"
] | 24
|
3c8ffa758ee6eaa15b8afe31ac1c03f87bbf6a14
|
https://github.com/JanRocketMan/regression-prior-networks/tree/3c8ffa758ee6eaa15b8afe31ac1c03f87bbf6a14
|
BasicConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, bias=
False, **kwargs)
def forward(self, x):
x = self.conv(x)
return F.leaky_relu(x, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.01
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = tmp5 > tmp1
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(16)](buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf1, primals_1, primals_2, buf2
class BasicConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(BasicConv2dNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, bias=
False, **kwargs)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
JinkaiZheng/TraND
|
BasicConv2d
| false
| 8,344
|
[
"MIT"
] | 33
|
a8babc34073ee126789969bd97e149bae4015953
|
https://github.com/JinkaiZheng/TraND/tree/a8babc34073ee126789969bd97e149bae4015953
|
TransNonlinear
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class TransNonlinear(nn.Module):
def __init__(self, d_model, dim_feedforward, dropout=0.1):
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = nn.ReLU()
def forward(self, src):
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'dim_feedforward': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf2,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](primals_3, buf2,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4,
1), 0), buf2, primals_4, buf6
class TransNonlinearNew(nn.Module):
def __init__(self, d_model, dim_feedforward, dropout=0.1):
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Jasonkks/PTTR
|
TransNonlinear
| false
| 8,345
|
[
"Apache-2.0"
] | 14
|
11f664a7f1b2281293d82a5450fdd3d4bfa5883e
|
https://github.com/Jasonkks/PTTR/tree/11f664a7f1b2281293d82a5450fdd3d4bfa5883e
|
LxmertAttention
|
import math
import torch
import torch.utils.data
import torch.nn as nn
import torch
import torch.nn.parallel
class LxmertAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads,
attention_probs_dropout_prob, ctx_dim=None):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
if ctx_dim is None:
ctx_dim = hidden_size
self.query = nn.Linear(hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None,
output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).repeat(
1, attention_scores.shape[1], attention_scores.shape[2], 1)
attention_scores.data.masked_fill_(attention_mask.data > 0, -
float('inf'))
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_attention_heads': 4,
'attention_probs_dropout_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.nn as nn
import torch
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class LxmertAttentionNew(nn.Module):
def __init__(self, hidden_size, num_attention_heads,
attention_probs_dropout_prob, ctx_dim=None):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
if ctx_dim is None:
ctx_dim = hidden_size
self.query = nn.Linear(hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_7 = self.value.weight
primals_8 = self.value.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
IsmaelElsharkawi/new_pororo_repo
|
LxmertAttention
| false
| 8,346
|
[
"MIT"
] | 19
|
4617083b420615b8a3eb0f44d02e4e91a8f407f7
|
https://github.com/IsmaelElsharkawi/new_pororo_repo/tree/4617083b420615b8a3eb0f44d02e4e91a8f407f7
|
Downsample
|
import torch
class Downsample(torch.nn.Module):
def __init__(self, s, use_max=False, batch_mode=False):
super(Downsample, self).__init__()
self.batch_mode = batch_mode
if use_max:
layer = torch.nn.MaxPool3d(s, stride=s)
else:
layer = torch.nn.Conv3d(1, 1, s, stride=s)
layer.weight.data.fill_(1.0 / layer.weight.data.nelement())
layer.bias.data.fill_(0)
self.layer = layer
def forward(self, vol):
if self.batch_mode:
out_vol = self.layer.forward(vol)
else:
out_vol = self.layer.forward(torch.unsqueeze(torch.unsqueeze(
vol, 0), 0))[0, 0]
return out_vol
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'s': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_out_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr0 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4, 4, 4), (64, 64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
1, 4, 4, 4), (64, 64, 16, 4, 1), 0), primals_2, stride=(4, 4, 4
), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1)](buf1, primals_3, 1, XBLOCK=
1, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (1, 1, 1), (1, 1, 1), 0
), primals_2, reinterpret_tensor(primals_1, (1, 1, 4, 4, 4), (64,
64, 16, 4, 1), 0)
class DownsampleNew(torch.nn.Module):
def __init__(self, s, use_max=False, batch_mode=False):
super(DownsampleNew, self).__init__()
self.batch_mode = batch_mode
if use_max:
layer = torch.nn.MaxPool3d(s, stride=s)
else:
layer = torch.nn.Conv3d(1, 1, s, stride=s)
layer.weight.data.fill_(1.0 / layer.weight.data.nelement())
layer.bias.data.fill_(0)
self.layer = layer
def forward(self, input_0):
primals_2 = self.layer.weight
primals_3 = self.layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JasonQSY/Associative3D
|
Downsample
| false
| 8,347
|
[
"MIT"
] | 25
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
|
MeanEmbedding
|
import torch
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
class MeanEmbedding(nn.Module):
"""Mean embedding class.
"""
def __init__(self):
super(MeanEmbedding, self).__init__()
def forward(self, emb, len_):
"""Compute average embeddings.
Parameters
----------
emb : torch.Tensor
The input embedding tensor.
len_ : torch.Tensor
The sequence length tensor.
Returns
-------
torch.Tensor
The average embedding tensor.
"""
return torch.sum(emb, dim=-2) / len_
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MeanEmbeddingNew(nn.Module):
"""Mean embedding class.
"""
def __init__(self):
super(MeanEmbeddingNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IBM/graph4nlp
|
MeanEmbedding
| false
| 8,348
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
GatedFusion
|
import torch
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
class GatedFusion(nn.Module):
def __init__(self, hidden_size):
super(GatedFusion, self).__init__()
"""GatedFusion module"""
self.fc_z = nn.Linear(4 * hidden_size, hidden_size, bias=True)
def forward(self, h_state, input):
z = torch.sigmoid(self.fc_z(torch.cat([h_state, input, h_state *
input, h_state - input], -1)))
h_state = (1 - z) * h_state + z * input
return h_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 - tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = tmp1 * tmp6
tmp8 = tmp5 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1024)](primals_1, primals_2, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf1, primals_1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, reinterpret_tensor(buf0, (64, 16), (
16, 1), 0), buf1
class GatedFusionNew(nn.Module):
def __init__(self, hidden_size):
super(GatedFusionNew, self).__init__()
"""GatedFusion module"""
self.fc_z = nn.Linear(4 * hidden_size, hidden_size, bias=True)
def forward(self, input_0, input_1):
primals_3 = self.fc_z.weight
primals_4 = self.fc_z.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
IBM/graph4nlp
|
GatedFusion
| false
| 8,349
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
IDPredictor
|
import torch
import torch.nn.functional as F
from torch import nn
class IDPredictor(nn.Module):
def __init__(self, nz_feat, n_dim=5):
super(IDPredictor, self).__init__()
self.pred_layer = nn.Linear(nz_feat, 256)
self.sc_layer = nn.Linear(256, 128)
self.sc_layer2 = nn.Linear(128, 64)
def forward(self, feat):
pred = self.pred_layer.forward(feat)
pred = F.relu(pred)
pred = self.sc_layer.forward(pred)
pred = F.relu(pred)
pred = self.sc_layer2.forward(pred)
return pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nz_feat': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128), (128, 1))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), primals_6, buf5, primals_4, buf6
class IDPredictorNew(nn.Module):
def __init__(self, nz_feat, n_dim=5):
super(IDPredictorNew, self).__init__()
self.pred_layer = nn.Linear(nz_feat, 256)
self.sc_layer = nn.Linear(256, 128)
self.sc_layer2 = nn.Linear(128, 64)
def forward(self, input_0):
primals_1 = self.pred_layer.weight
primals_2 = self.pred_layer.bias
primals_4 = self.sc_layer.weight
primals_5 = self.sc_layer.bias
primals_6 = self.sc_layer2.weight
primals_7 = self.sc_layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JasonQSY/Associative3D
|
IDPredictor
| false
| 8,350
|
[
"MIT"
] | 25
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
|
ConvModule
|
import torch
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModuleNew(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModuleNew, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
IlyaBizyaev/ttools
|
ConvModule
| false
| 8,351
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
Normalize
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class Normalize(nn.Module):
def __init__(self, p=2):
super(Normalize, self).__init__()
self.p = p
def forward(self, x):
return F.normalize(x, p=self.p, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
def __init__(self, p=2):
super(NormalizeNew, self).__init__()
self.p = p
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JindongGu/SimDis
|
Normalize
| false
| 8,352
|
[
"MIT"
] | 12
|
0871a217a756acc268f35f802e35b01b12817f0d
|
https://github.com/JindongGu/SimDis/tree/0871a217a756acc268f35f802e35b01b12817f0d
|
MultiHeadAttn
|
import torch
import torch.nn.functional as F
from torch import nn
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float
('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf')
)
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.
size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = h + attn_out
else:
output = self.layer_norm(h + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * y1 + 128 * x2), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 32 * x3 + 128 * x1), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(16, 16)](buf4, buf5, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(256)](buf1, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf9,
buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_6
return buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16,
16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class MultiHeadAttnNew(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttnNew, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, input_0):
primals_2 = self.q_net.weight
primals_3 = self.kv_net.weight
primals_4 = self.o_net.weight
primals_5 = self.layer_norm.weight
primals_6 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
JasonBenn/duet
|
MultiHeadAttn
| false
| 8,353
|
[
"Apache-2.0"
] | 11
|
0d6f1f66fad097023b022f2a361a1587d0f740ba
|
https://github.com/JasonBenn/duet/tree/0d6f1f66fad097023b022f2a361a1587d0f740ba
|
PositionalWiseFeedForward
|
import math
import torch
import torch.nn as nn
class GELU(nn.Module):
"""
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class PositionalWiseFeedForward(nn.Module):
def __init__(self, model_dim=512, ffn_dim=2048, dropout=0.0):
super(PositionalWiseFeedForward, self).__init__()
self.w1 = nn.Conv1d(model_dim, ffn_dim, 1)
self.w2 = nn.Conv1d(ffn_dim, model_dim, 1)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
self.gelu = GELU()
def forward(self, x):
"""
:param x: [b, t, d*h]
:return:
"""
output = x.transpose(1, 2)
output = self.w2(self.gelu(self.w1(output)))
output = self.dropout(output.transpose(1, 2))
output = self.layer_norm(x + output)
return output
def get_inputs():
return [torch.rand([4, 512, 512])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 262144 * y1), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 512 * y3), tmp0, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_pow_tanh_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 2048
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp2 * tmp2
tmp6 = tmp5 * tmp2
tmp7 = 0.044715
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tmp10 = 0.7978845608028654
tmp11 = tmp9 * tmp10
tmp12 = libdevice.tanh(tmp11)
tmp13 = 1.0
tmp14 = tmp12 + tmp13
tmp15 = tmp4 * tmp14
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_red_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 512
x2 = xindex // 2048
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r3 + 128 * x4), rmask, eviction_policy=
'evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (x1 + 512 * r3 + 65536 * x0 + 262144 * x2),
rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask, tmp4_weight_next, tmp4_weight)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + x4, tmp4, None)
tl.store(out_ptr1 + x4, tmp5, None)
tl.store(out_ptr2 + x4, tmp6, None)
@triton.jit
def triton_per_fused_add_native_layer_norm_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), None)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), None)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), None)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7, tmp8, tmp9 = triton_helpers.welford(tmp3, tmp4, tmp5, 1)
tmp10 = tmp7[:, None]
tmp11 = tmp8[:, None]
tmp9[:, None]
tmp13 = 512.0
tmp14 = tmp11 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp17, None)
tl.store(out_ptr0 + x0, tmp10, None)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 512 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 512 * x2 + 262144 * y1), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y3, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2 + 512 * y3), tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 512), (262144, 512, 1))
assert_size_stride(primals_2, (2048, 512, 1), (512, 1, 1))
assert_size_stride(primals_3, (2048,), (1,))
assert_size_stride(primals_4, (512, 2048, 1), (2048, 1, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (512,), (1,))
assert_size_stride(primals_7, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 512), (262144, 512, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(2048, 512)](primals_1, buf0,
2048, 512, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 2048, 512), (1048576, 512, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 2048, 512), (1048576, 512, 1), torch.
float32)
triton_poi_fused_add_convolution_mul_pow_tanh_1[grid(4194304)](buf2,
primals_3, buf3, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 512), (262144, 512, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(1048576)](buf5, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 512, 1, 4), (2048, 4, 8192, 1), torch
.float32)
buf7 = empty_strided_cuda((4, 512, 1, 4), (2048, 4, 8192, 1), torch
.float32)
buf8 = empty_strided_cuda((4, 512, 1, 4), (2048, 4, 8192, 1), torch
.float32)
triton_red_fused_add_native_layer_norm_3[grid(8192)](primals_1,
buf5, buf6, buf7, buf8, 8192, 128, XBLOCK=64, RBLOCK=8,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 512, 1), (512, 1, 1), torch.float32)
buf10 = empty_strided_cuda((4, 512, 1), (512, 1, 2048), torch.float32)
buf12 = reinterpret_tensor(buf10, (4, 512, 1), (512, 1, 1), 0)
del buf10
triton_per_fused_add_native_layer_norm_4[grid(2048)](buf12, buf6,
buf7, buf8, buf9, 2048, 4, XBLOCK=32, num_warps=2, num_stages=1)
del buf6
del buf7
del buf8
buf13 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_5[grid(2048, 512)](primals_1,
buf5, buf9, buf12, primals_6, primals_7, buf13, 2048, 512,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_7
return (buf13, primals_1, primals_2, primals_4, primals_6, buf2, buf3,
buf5, buf9, buf12)
class GELU(nn.Module):
"""
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class PositionalWiseFeedForwardNew(nn.Module):
def __init__(self, model_dim=512, ffn_dim=2048, dropout=0.0):
super(PositionalWiseFeedForwardNew, self).__init__()
self.w1 = nn.Conv1d(model_dim, ffn_dim, 1)
self.w2 = nn.Conv1d(ffn_dim, model_dim, 1)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
self.gelu = GELU()
def forward(self, input_0):
primals_2 = self.w1.weight
primals_3 = self.w1.bias
primals_4 = self.w2.weight
primals_5 = self.w2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JiaweiSheng/FAAN
|
PositionalWiseFeedForward
| false
| 8,354
|
[
"MIT"
] | 41
|
b439b829506c4e2e9044a6b2ab7f3d844f445a95
|
https://github.com/JiaweiSheng/FAAN/tree/b439b829506c4e2e9044a6b2ab7f3d844f445a95
|
SelfAttention
|
import torch
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
class SelfAttention(nn.Module):
def __init__(self, input_size, hidden_size):
super(SelfAttention, self).__init__()
self.W1 = torch.Tensor(input_size, hidden_size)
self.W1 = nn.Parameter(nn.init.xavier_uniform_(self.W1))
self.W2 = torch.Tensor(hidden_size, 1)
self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2))
def forward(self, x, attention_mask=None):
attention = torch.mm(torch.tanh(torch.mm(x.view(-1, x.size(-1)),
self.W1)), self.W2).view(x.size(0), -1)
if attention_mask is not None:
attention = attention.masked_fill_(1 - attention_mask.byte(), -INF)
probs = torch.softmax(attention, dim=-1).unsqueeze(1)
weighted_x = torch.bmm(probs, x).squeeze(1)
return weighted_x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0
), primals_1, out=buf5)
del buf4
return reinterpret_tensor(buf5, (4, 4), (4, 1), 0
), primals_1, buf1, buf2, reinterpret_tensor(primals_3, (1, 4), (1,
1), 0)
class SelfAttentionNew(nn.Module):
def __init__(self, input_size, hidden_size):
super(SelfAttentionNew, self).__init__()
self.W1 = torch.Tensor(input_size, hidden_size)
self.W1 = nn.Parameter(nn.init.xavier_uniform_(self.W1))
self.W2 = torch.Tensor(hidden_size, 1)
self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2))
def forward(self, input_0):
primals_2 = self.W1
primals_3 = self.W2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
IBM/graph4nlp
|
SelfAttention
| false
| 8,355
|
[
"Apache-2.0"
] | 18
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
https://github.com/IBM/graph4nlp/tree/a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
ScaledDotProductAttention
|
import torch
import numpy as np
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention
"""
def __init__(self, attn_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, scale=None, attn_mask=None):
"""
:param attn_mask: [batch, time]
:param scale:
:param q: [batch, time, dim]
:param k: [batch, time, dim]
:param v: [batch, time, dim]
:return:
"""
attn = torch.bmm(q, k.transpose(1, 2))
if scale:
attn = attn * scale
if attn_mask:
attn = attn.masked_fill_(attn_mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention
"""
def __init__(self, attn_dropout=0.0):
super(ScaledDotProductAttentionNew, self).__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
JiaweiSheng/FAAN
|
ScaledDotProductAttention
| false
| 8,356
|
[
"MIT"
] | 41
|
b439b829506c4e2e9044a6b2ab7f3d844f445a95
|
https://github.com/JiaweiSheng/FAAN/tree/b439b829506c4e2e9044a6b2ab7f3d844f445a95
|
MLP
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=128):
""" 初始化q网络,为全连接网络
input_dim: 输入的特征数即环境的状态维度
output_dim: 输出的动作维度
"""
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), primals_6, buf5, primals_4, buf6
class MLPNew(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=128):
""" 初始化q网络,为全连接网络
input_dim: 输入的特征数即环境的状态维度
output_dim: 输出的动作维度
"""
super(MLPNew, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JohnJim0816/rl-tutorials
|
MLP
| false
| 8,357
|
[
"MIT"
] | 16
|
e99daea815da85f9f25dff2d01b030249a203d22
|
https://github.com/JohnJim0816/rl-tutorials/tree/e99daea815da85f9f25dff2d01b030249a203d22
|
FixupBasicBlock
|
import torch
import torch as th
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, padding_mode='zero',
activation='relu'):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.activation(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
crop = (self.ksize - 1) // 2 * 2
if crop > 0 and not self.pad:
identity = identity[:, :, crop:-crop, crop:-crop]
out += identity
out = self.activation2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch as th
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp10 = tmp7 + tmp9
tmp11 = 0.0
tmp12 = tmp7 <= tmp11
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf1, primals_4, primals_5, primals_6, buf2, buf7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
del primals_5
del primals_6
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_mul_relu_threshold_backward_2[grid
(256)](buf4, primals_8, primals_9, primals_10, primals_1, buf5,
buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_10
del primals_8
return buf5, primals_3, primals_7, primals_9, buf0, buf2, buf4, buf6, buf7
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, pad the convolutions to maintain a constant size.
padding_mode(str): 'zero' or 'reflection'.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True,
padding_mode='zero', activation=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
assert padding_mode in ['zero', 'reflection'], 'Invalid padding mode'
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
conv_pad = padding
if padding_mode == 'reflection' and padding > 0:
self.add_module('reflection_pad', nn.ReflectionPad2d(padding))
conv_pad = 0
else:
self.add_module('no-op', nn.Identity())
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=conv_pad, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlockNew(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, padding_mode='zero',
activation='relu'):
super(FixupBasicBlockNew, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None, padding_mode=
padding_mode)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, input_0):
primals_2 = self.bias1a
primals_5 = self.bias1b
primals_6 = self.bias2a
primals_9 = self.scale
primals_10 = self.bias2b
primals_3 = self.conv1.conv.weight
primals_4 = self.conv1.conv.bias
primals_7 = self.conv2.conv.weight
primals_8 = self.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
IlyaBizyaev/ttools
|
FixupBasicBlock
| false
| 8,358
|
[
"MIT"
] | 11
|
b1435b19f397ce1baff9daed3cb287e52a029fdb
|
https://github.com/IlyaBizyaev/ttools/tree/b1435b19f397ce1baff9daed3cb287e52a029fdb
|
LabelPredictor
|
import torch
from torch import nn
class LabelPredictor(nn.Module):
def __init__(self, nz_feat, classify_rot=True):
super(LabelPredictor, self).__init__()
self.pred_layer = nn.Linear(nz_feat, 1)
def forward(self, feat):
pred = self.pred_layer.forward(feat)
pred = torch.sigmoid(pred)
return pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nz_feat': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class LabelPredictorNew(nn.Module):
def __init__(self, nz_feat, classify_rot=True):
super(LabelPredictorNew, self).__init__()
self.pred_layer = nn.Linear(nz_feat, 1)
def forward(self, input_0):
primals_1 = self.pred_layer.weight
primals_2 = self.pred_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JasonQSY/Associative3D
|
LabelPredictor
| false
| 8,359
|
[
"MIT"
] | 25
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
|
MultimodalHead
|
import torch
from torch import nn
class MultimodalHead(nn.Module):
"""
Multimodal head for the conv net outputs.
This layer concatenate the outputs of audio and visual convoluational nets
and performs a fully-connected projection
"""
def __init__(self, dim_in, num_classes, dropout_rate=0.0, act_func=
'softmax'):
"""
Args:
dim_in (int): the channel dimensions of the visual/audio inputs.
num_classes (int): the channel dimension of the output.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(MultimodalHead, self).__init__()
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
if act_func == 'softmax':
self.act = nn.Softmax(dim=-1)
elif act_func == 'sigmoid':
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
'{} is not supported as an activationfunction.'.format(
act_func))
def forward(self, x, y):
xy_cat = torch.cat((x, y), dim=-1)
if hasattr(self, 'dropout'):
xy_cat = self.dropout(xy_cat)
xy_cat = self.projection(xy_cat)
if not self.training:
xy_cat = self.act(xy_cat)
return xy_cat
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': [4, 4], 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
return buf3, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf3
class MultimodalHeadNew(nn.Module):
"""
Multimodal head for the conv net outputs.
This layer concatenate the outputs of audio and visual convoluational nets
and performs a fully-connected projection
"""
def __init__(self, dim_in, num_classes, dropout_rate=0.0, act_func=
'softmax'):
"""
Args:
dim_in (int): the channel dimensions of the visual/audio inputs.
num_classes (int): the channel dimension of the output.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(MultimodalHeadNew, self).__init__()
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
if act_func == 'softmax':
self.act = nn.Softmax(dim=-1)
elif act_func == 'sigmoid':
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
'{} is not supported as an activationfunction.'.format(
act_func))
def forward(self, input_0, input_1):
primals_3 = self.projection.weight
primals_4 = self.projection.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
JiwanChung/acav100m
|
MultimodalHead
| false
| 8,360
|
[
"MIT"
] | 27
|
51cb948d5682da69334a8d05d2df631971b60215
|
https://github.com/JiwanChung/acav100m/tree/51cb948d5682da69334a8d05d2df631971b60215
|
CNN_small
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.data
class CNN_small(nn.Module):
def __init__(self, num_classes=10):
super(CNN_small, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class CNN_smallNew(nn.Module):
def __init__(self, num_classes=10):
super(CNN_smallNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, num_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
JiarunLiu/Co-correcting
|
CNN_small
| false
| 8,361
|
[
"Apache-2.0"
] | 19
|
4e3ca4951de5d73ca812bbbcfe666273082ff2fd
|
https://github.com/JiarunLiu/Co-correcting/tree/4e3ca4951de5d73ca812bbbcfe666273082ff2fd
|
CRFLoss
|
import torch
import torch.nn as nn
class CRFLoss(nn.Module):
def __init__(self, L, init):
super(CRFLoss, self).__init__()
self.start = nn.Parameter(torch.Tensor(L).uniform_(-init, init))
self.T = nn.Parameter(torch.Tensor(L, L).uniform_(-init, init))
self.end = nn.Parameter(torch.Tensor(L).uniform_(-init, init))
def forward(self, scores, targets):
normalizers = self.compute_normalizers(scores)
target_scores = self.score_targets(scores, targets)
loss = (normalizers - target_scores).mean()
return loss
def decode(self, scores):
_B, T, _L = scores.size()
prev = self.start.unsqueeze(0) + scores[:, 0]
back = []
for i in range(1, T):
cur = prev.unsqueeze(2) + scores.transpose(0, 1)[i].unsqueeze(1
) + self.T.transpose(0, 1)
prev, indices = cur.max(dim=1)
back.append(indices)
prev += self.end
max_scores, indices = prev.max(dim=1)
tape = [indices]
back = list(reversed(back))
for i in range(T - 1):
indices = torch.gather(back[i], 1, indices.unsqueeze(1)).squeeze(1)
tape.append(indices)
return max_scores, torch.stack(tape[::-1], dim=1)
def compute_normalizers(self, scores):
_B, T, _L = scores.size()
prev = self.start + scores.transpose(0, 1)[0]
for i in range(1, T):
cur = prev.unsqueeze(2) + scores.transpose(0, 1)[i].unsqueeze(1
) + self.T.transpose(0, 1)
prev = torch.logsumexp(cur, dim=1).clone()
prev += self.end
normalizers = torch.logsumexp(prev, 1)
return normalizers
def score_targets(self, scores, targets):
_B, T, _L = scores.size()
emits = scores.gather(2, targets.unsqueeze(2)).squeeze(2).sum(1)
trans = torch.stack([self.start.gather(0, targets[:, 0])] + [self.T
[targets[:, i], targets[:, i - 1]] for i in range(1, T)] + [
self.end.gather(0, targets[:, -1])]).sum(0)
return emits + trans
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64)]
def get_init_inputs():
return [[], {'L': 4, 'init': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_logsumexp_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 16 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr1 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr0 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp30 = tl.load(in_ptr1 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp1 + tmp3
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp4 + tmp6
tmp9 = tmp7 + tmp8
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 + tmp13
tmp15 = tmp14 + tmp6
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp9, tmp17)
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp20 + tmp22
tmp24 = tmp23 + tmp6
tmp26 = tmp24 + tmp25
tmp27 = triton_helpers.maximum(tmp18, tmp26)
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 + tmp6
tmp35 = tmp33 + tmp34
tmp36 = triton_helpers.maximum(tmp27, tmp35)
tmp37 = tl_math.abs(tmp36)
tmp38 = float('inf')
tmp39 = tmp37 == tmp38
tmp40 = 0.0
tmp41 = tl.where(tmp39, tmp40, tmp36)
tmp42 = tmp9 - tmp41
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp17 - tmp41
tmp45 = tl_math.exp(tmp44)
tmp46 = tmp43 + tmp45
tmp47 = tmp26 - tmp41
tmp48 = tl_math.exp(tmp47)
tmp49 = tmp46 + tmp48
tmp50 = tmp35 - tmp41
tmp51 = tl_math.exp(tmp50)
tmp52 = tmp49 + tmp51
tl.store(out_ptr0 + x2, tmp36, xmask)
tl.store(out_ptr1 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused_add_logsumexp_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp38 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp44 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = float('inf')
tmp5 = tmp3 == tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp2)
tmp8 = tmp1 + tmp7
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tl_math.log(tmp14)
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp17 == tmp4
tmp19 = tl.where(tmp18, tmp6, tmp16)
tmp20 = tmp15 + tmp19
tmp21 = tmp20 + tmp10
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp26 = tl_math.log(tmp25)
tmp28 = tl_math.abs(tmp27)
tmp29 = tmp28 == tmp4
tmp30 = tl.where(tmp29, tmp6, tmp27)
tmp31 = tmp26 + tmp30
tmp32 = tmp31 + tmp10
tmp34 = tmp32 + tmp33
tmp35 = triton_helpers.maximum(tmp24, tmp34)
tmp37 = tl_math.log(tmp36)
tmp39 = tl_math.abs(tmp38)
tmp40 = tmp39 == tmp4
tmp41 = tl.where(tmp40, tmp6, tmp38)
tmp42 = tmp37 + tmp41
tmp43 = tmp42 + tmp10
tmp45 = tmp43 + tmp44
tmp46 = triton_helpers.maximum(tmp35, tmp45)
tmp47 = tl_math.abs(tmp46)
tmp48 = tmp47 == tmp4
tmp49 = tl.where(tmp48, tmp6, tmp46)
tmp50 = tmp13 - tmp49
tmp51 = tl_math.exp(tmp50)
tmp52 = tmp23 - tmp49
tmp53 = tl_math.exp(tmp52)
tmp54 = tmp51 + tmp53
tmp55 = tmp34 - tmp49
tmp56 = tl_math.exp(tmp55)
tmp57 = tmp54 + tmp56
tmp58 = tmp45 - tmp49
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tl.store(out_ptr0 + x2, tmp46, xmask)
tl.store(out_ptr1 + x2, tmp60, xmask)
@triton.jit
def triton_poi_fused_add_logsumexp_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp38 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp44 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = float('inf')
tmp5 = tmp3 == tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp2)
tmp8 = tmp1 + tmp7
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tl_math.log(tmp14)
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp17 == tmp4
tmp19 = tl.where(tmp18, tmp6, tmp16)
tmp20 = tmp15 + tmp19
tmp21 = tmp20 + tmp10
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp26 = tl_math.log(tmp25)
tmp28 = tl_math.abs(tmp27)
tmp29 = tmp28 == tmp4
tmp30 = tl.where(tmp29, tmp6, tmp27)
tmp31 = tmp26 + tmp30
tmp32 = tmp31 + tmp10
tmp34 = tmp32 + tmp33
tmp35 = triton_helpers.maximum(tmp24, tmp34)
tmp37 = tl_math.log(tmp36)
tmp39 = tl_math.abs(tmp38)
tmp40 = tmp39 == tmp4
tmp41 = tl.where(tmp40, tmp6, tmp38)
tmp42 = tmp37 + tmp41
tmp43 = tmp42 + tmp10
tmp45 = tmp43 + tmp44
tmp46 = triton_helpers.maximum(tmp35, tmp45)
tmp47 = tl_math.abs(tmp46)
tmp48 = tmp47 == tmp4
tmp49 = tl.where(tmp48, tmp6, tmp46)
tmp50 = tmp13 - tmp49
tmp51 = tl_math.exp(tmp50)
tmp52 = tmp23 - tmp49
tmp53 = tl_math.exp(tmp52)
tmp54 = tmp51 + tmp53
tmp55 = tmp34 - tmp49
tmp56 = tl_math.exp(tmp55)
tmp57 = tmp54 + tmp56
tmp58 = tmp45 - tmp49
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tl.store(out_ptr0 + x2, tmp46, xmask)
tl.store(out_ptr1 + x2, tmp60, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tl.broadcast_to(tmp9, [XBLOCK])) & (tl.
broadcast_to(tmp9, [XBLOCK]) < 4) | ~(tmp4 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp9, [XBLOCK]) < 4')
tmp11 = tl.load(in_ptr1 + tl.broadcast_to(tmp9, [XBLOCK]), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (1 + 4 * (-4 + x0)), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp6
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tl.device_assert((0 <= tl.broadcast_to(tmp19, [XBLOCK])) & (tl.
broadcast_to(tmp19, [XBLOCK]) < 4) | ~(tmp15 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp19, [XBLOCK]) < 4')
tmp21 = tl.load(in_ptr0 + 4 * (-4 + x0), tmp15 & xmask, eviction_policy
='evict_last', other=0.0)
tmp22 = tmp21 + tmp6
tmp23 = tmp21 < 0
tmp24 = tl.where(tmp23, tmp22, tmp21)
tl.device_assert((0 <= tl.broadcast_to(tmp24, [XBLOCK])) & (tl.
broadcast_to(tmp24, [XBLOCK]) < 4) | ~(tmp15 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp24, [XBLOCK]) < 4')
tmp26 = tl.load(in_ptr2 + tl.broadcast_to(tmp24 + 4 * tmp19, [XBLOCK]),
tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp0 >= tmp13
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr0 + (2 + 4 * (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp6
tmp33 = tmp31 < 0
tmp34 = tl.where(tmp33, tmp32, tmp31)
tl.device_assert((0 <= tl.broadcast_to(tmp34, [XBLOCK])) & (tl.
broadcast_to(tmp34, [XBLOCK]) < 4) | ~(tmp30 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp34, [XBLOCK]) < 4')
tmp36 = tl.load(in_ptr0 + (1 + 4 * (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp36 + tmp6
tmp38 = tmp36 < 0
tmp39 = tl.where(tmp38, tmp37, tmp36)
tl.device_assert((0 <= tl.broadcast_to(tmp39, [XBLOCK])) & (tl.
broadcast_to(tmp39, [XBLOCK]) < 4) | ~(tmp30 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp39, [XBLOCK]) < 4')
tmp41 = tl.load(in_ptr2 + tl.broadcast_to(tmp39 + 4 * tmp34, [XBLOCK]),
tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tmp0 >= tmp28
tmp43 = tl.full([1], 16, tl.int64)
tmp44 = tmp0 < tmp43
tmp45 = tmp42 & tmp44
tmp46 = tl.load(in_ptr0 + (3 + 4 * (-12 + x0)), tmp45 & xmask,
eviction_policy='evict_last', other=0.0)
tmp47 = tmp46 + tmp6
tmp48 = tmp46 < 0
tmp49 = tl.where(tmp48, tmp47, tmp46)
tl.device_assert((0 <= tl.broadcast_to(tmp49, [XBLOCK])) & (tl.
broadcast_to(tmp49, [XBLOCK]) < 4) | ~(tmp45 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp49, [XBLOCK]) < 4')
tmp51 = tl.load(in_ptr0 + (2 + 4 * (-12 + x0)), tmp45 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp51 + tmp6
tmp53 = tmp51 < 0
tmp54 = tl.where(tmp53, tmp52, tmp51)
tl.device_assert((0 <= tl.broadcast_to(tmp54, [XBLOCK])) & (tl.
broadcast_to(tmp54, [XBLOCK]) < 4) | ~(tmp45 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp54, [XBLOCK]) < 4')
tmp56 = tl.load(in_ptr2 + tl.broadcast_to(tmp54 + 4 * tmp49, [XBLOCK]),
tmp45 & xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tmp0 >= tmp43
tl.full([1], 20, tl.int64)
tmp60 = tl.load(in_ptr0 + (3 + 4 * (-16 + x0)), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp61 = tmp60 + tmp6
tmp62 = tmp60 < 0
tmp63 = tl.where(tmp62, tmp61, tmp60)
tl.device_assert((0 <= tl.broadcast_to(tmp63, [XBLOCK])) & (tl.
broadcast_to(tmp63, [XBLOCK]) < 4) | ~(tmp57 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp63, [XBLOCK]) < 4')
tmp65 = tl.load(in_ptr3 + tl.broadcast_to(tmp63, [XBLOCK]), tmp57 &
xmask, eviction_policy='evict_last', other=0.0)
tmp66 = tl.where(tmp45, tmp56, tmp65)
tmp67 = tl.where(tmp30, tmp41, tmp66)
tmp68 = tl.where(tmp15, tmp26, tmp67)
tmp69 = tl.where(tmp4, tmp11, tmp68)
tl.store(out_ptr0 + x0, tmp69, xmask)
@triton.jit
def triton_per_fused_add_logsumexp_mean_sub_sum_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel, XBLOCK: tl
.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + r0, None)
tmp30 = tl.load(in_ptr2 + (4 + r0), None)
tmp32 = tl.load(in_ptr2 + (8 + r0), None)
tmp34 = tl.load(in_ptr2 + (12 + r0), None)
tmp36 = tl.load(in_ptr2 + (16 + r0), None)
tmp39 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr4 + 4 * r0, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr5 + 0)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr4 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr5 + 1)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK, RBLOCK])
tmp62 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr4 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp69 = tl.load(in_ptr5 + 2)
tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK])
tmp73 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp75 = tl.load(in_ptr4 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr5 + 3)
tmp81 = tl.broadcast_to(tmp80, [XBLOCK, RBLOCK])
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 16 * r0), None, eviction_policy=
'evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 4),
'index out of bounds: 0 <= tmp10 < 4')
tmp12 = tl.load(in_ptr1 + (4 + tmp10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp13 = tmp6 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.device_assert((0 <= tmp17) & (tmp17 < 4),
'index out of bounds: 0 <= tmp17 < 4')
tmp19 = tl.load(in_ptr1 + (8 + tmp17 + 16 * r0), None, eviction_policy=
'evict_last')
tmp20 = tmp13 + tmp19
tmp22 = tmp21 + tmp1
tmp23 = tmp21 < 0
tmp24 = tl.where(tmp23, tmp22, tmp21)
tl.device_assert((0 <= tmp24) & (tmp24 < 4),
'index out of bounds: 0 <= tmp24 < 4')
tmp26 = tl.load(in_ptr1 + (12 + tmp24 + 16 * r0), None, eviction_policy
='evict_last')
tmp27 = tmp20 + tmp26
tmp28 = tmp27.to(tl.float32)
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp35 = tmp33 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = tmp28 + tmp37
tmp40 = tl_math.log(tmp39)
tmp42 = tl_math.abs(tmp41)
tmp43 = float('inf')
tmp44 = tmp42 == tmp43
tmp45 = 0.0
tmp46 = tl.where(tmp44, tmp45, tmp41)
tmp47 = tmp40 + tmp46
tmp50 = tmp47 + tmp49
tmp52 = tl_math.log(tmp51)
tmp54 = tl_math.abs(tmp53)
tmp55 = tmp54 == tmp43
tmp56 = tl.where(tmp55, tmp45, tmp53)
tmp57 = tmp52 + tmp56
tmp60 = tmp57 + tmp59
tmp61 = triton_helpers.maximum(tmp50, tmp60)
tmp63 = tl_math.log(tmp62)
tmp65 = tl_math.abs(tmp64)
tmp66 = tmp65 == tmp43
tmp67 = tl.where(tmp66, tmp45, tmp64)
tmp68 = tmp63 + tmp67
tmp71 = tmp68 + tmp70
tmp72 = triton_helpers.maximum(tmp61, tmp71)
tmp74 = tl_math.log(tmp73)
tmp76 = tl_math.abs(tmp75)
tmp77 = tmp76 == tmp43
tmp78 = tl.where(tmp77, tmp45, tmp75)
tmp79 = tmp74 + tmp78
tmp82 = tmp79 + tmp81
tmp83 = triton_helpers.maximum(tmp72, tmp82)
tmp84 = tl_math.abs(tmp83)
tmp85 = tmp84 == tmp43
tmp86 = tl.where(tmp85, tmp45, tmp83)
tmp87 = tmp50 - tmp86
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp60 - tmp86
tmp90 = tl_math.exp(tmp89)
tmp91 = tmp88 + tmp90
tmp92 = tmp71 - tmp86
tmp93 = tl_math.exp(tmp92)
tmp94 = tmp91 + tmp93
tmp95 = tmp82 - tmp86
tmp96 = tl_math.exp(tmp95)
tmp97 = tmp94 + tmp96
tmp98 = tl_math.log(tmp97)
tmp99 = tmp98 + tmp86
tmp100 = tmp99 - tmp38
tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK])
tmp103 = tl.sum(tmp101, 1)[:, None]
tmp104 = 4.0
tmp105 = tmp103 / tmp104
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp105, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_logsumexp_0[grid(16)](primals_2, primals_1,
primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_logsumexp_1[grid(16)](buf1, buf0, primals_1,
primals_3, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 1, 4), (4, 16, 1), 0)
del buf1
buf5 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused_add_logsumexp_2[grid(16)](buf3, buf2, primals_1,
primals_3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del buf3
buf8 = empty_strided_cuda((20,), (1,), torch.float32)
triton_poi_fused_stack_3[grid(20)](primals_5, primals_2, primals_3,
primals_4, buf8, 20, XBLOCK=32, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((), (), torch.float32)
buf11 = buf10
del buf10
triton_per_fused_add_logsumexp_mean_sub_sum_4[grid(1)](buf11,
primals_5, primals_1, buf8, buf5, buf4, primals_4, 1, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf4
del buf5
del buf8
return (buf11, primals_1, primals_2, primals_3, primals_4,
reinterpret_tensor(primals_5, (4,), (4,), 0), reinterpret_tensor(
primals_5, (4,), (4,), 1), reinterpret_tensor(primals_5, (4,), (4,),
2), reinterpret_tensor(primals_5, (4,), (4,), 3),
reinterpret_tensor(primals_5, (4,), (4,), 3))
class CRFLossNew(nn.Module):
def __init__(self, L, init):
super(CRFLossNew, self).__init__()
self.start = nn.Parameter(torch.Tensor(L).uniform_(-init, init))
self.T = nn.Parameter(torch.Tensor(L, L).uniform_(-init, init))
self.end = nn.Parameter(torch.Tensor(L).uniform_(-init, init))
def decode(self, scores):
_B, T, _L = scores.size()
prev = self.start.unsqueeze(0) + scores[:, 0]
back = []
for i in range(1, T):
cur = prev.unsqueeze(2) + scores.transpose(0, 1)[i].unsqueeze(1
) + self.T.transpose(0, 1)
prev, indices = cur.max(dim=1)
back.append(indices)
prev += self.end
max_scores, indices = prev.max(dim=1)
tape = [indices]
back = list(reversed(back))
for i in range(T - 1):
indices = torch.gather(back[i], 1, indices.unsqueeze(1)).squeeze(1)
tape.append(indices)
return max_scores, torch.stack(tape[::-1], dim=1)
def compute_normalizers(self, scores):
_B, T, _L = scores.size()
prev = self.start + scores.transpose(0, 1)[0]
for i in range(1, T):
cur = prev.unsqueeze(2) + scores.transpose(0, 1)[i].unsqueeze(1
) + self.T.transpose(0, 1)
prev = torch.logsumexp(cur, dim=1).clone()
prev += self.end
normalizers = torch.logsumexp(prev, 1)
return normalizers
def score_targets(self, scores, targets):
_B, T, _L = scores.size()
emits = scores.gather(2, targets.unsqueeze(2)).squeeze(2).sum(1)
trans = torch.stack([self.start.gather(0, targets[:, 0])] + [self.T
[targets[:, i], targets[:, i - 1]] for i in range(1, T)] + [
self.end.gather(0, targets[:, -1])]).sum(0)
return emits + trans
def forward(self, input_0, input_1):
primals_2 = self.start
primals_3 = self.T
primals_4 = self.end
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Johannes0Horn/mtl-dts
|
CRFLoss
| false
| 8,362
|
[
"MIT"
] | 19
|
ae50253c808bbb77af3b1117f69f08d2268099e9
|
https://github.com/Johannes0Horn/mtl-dts/tree/ae50253c808bbb77af3b1117f69f08d2268099e9
|
NonLocalBlock
|
import torch
import torch.nn as nn
from time import *
class NonLocalBlock(nn.Module):
def __init__(self, channel):
super(NonLocalBlock, self).__init__()
self.inter_channel = channel // 2
self.conv_phi = nn.Conv2d(in_channels=channel, out_channels=self.
inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_theta = nn.Conv2d(in_channels=channel, out_channels=self.
inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_g = nn.Conv2d(in_channels=channel, out_channels=self.
inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax = nn.Softmax(dim=1)
self.conv_mask = nn.Conv2d(in_channels=self.inter_channel,
out_channels=channel, kernel_size=1, stride=1, padding=0, bias=
False)
def forward(self, x):
b, c, h, w = x.size()
x_phi = self.conv_phi(x).view(b, c, -1)
x_theta = self.conv_theta(x).view(b, c, -1).permute(0, 2, 1
).contiguous()
x_g = self.conv_g(x).view(b, c, -1).permute(0, 2, 1).contiguous()
mul_theta_phi = torch.matmul(x_theta, x_phi)
mul_theta_phi = self.softmax(mul_theta_phi)
mul_theta_phi_g = torch.matmul(mul_theta_phi, x_g)
mul_theta_phi_g = mul_theta_phi_g.permute(0, 2, 1).contiguous().view(b,
self.inter_channel, h, w)
mask = self.conv_mask(mul_theta_phi_g)
out = mask + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from time import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_out_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x1 + 8 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.debug_barrier()
tl.store(in_out_ptr0 + (x1 + 8 * y0), tmp0, xmask & ymask)
tl.store(out_ptr0 + (y2 + 4 * x1 + 32 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 32
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 8
x1 = xindex // 8
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + (x0 + 8 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_clone_view_3(in_out_ptr0, in_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 8 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, 2, 1, 1), (2, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 4, 4), (32, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1))
buf3 = reinterpret_tensor(buf1, (4, 8, 4), (32, 1, 8), 0)
del buf1
buf15 = empty_strided_cuda((4, 4, 8), (32, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 8)](buf3, buf15, 16, 8,
XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf0, (4, 4, 8), (32, 8,
1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 8), (8, 32, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 8), (8, 32, 1), torch.float32)
triton_per_fused__softmax_1[grid(32)](buf4, buf5, buf6, 32, 8,
XBLOCK=1, num_warps=2, num_stages=1)
buf7 = buf4
del buf4
triton_poi_fused__softmax_2[grid(256)](buf7, buf5, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf2, (4, 8, 4), (32, 1, 8), 0)
del buf2
buf14 = reinterpret_tensor(buf3, (4, 4, 8), (32, 1, 4), 0)
del buf3
triton_poi_fused_clone_transpose_0[grid(16, 8)](buf8, buf14, 16, 8,
XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
extern_kernels.bmm(buf7, buf8, out=buf9)
buf10 = reinterpret_tensor(buf8, (4, 4, 8), (32, 8, 1), 0)
del buf8
buf11 = reinterpret_tensor(buf10, (4, 2, 4, 4), (32, 16, 4, 1), 0)
del buf10
triton_poi_fused_clone_view_3[grid(16, 8)](buf11, buf9, 16, 8,
XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
del buf9
buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = buf12
del buf12
triton_poi_fused_add_4[grid(256)](buf13, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return (buf13, primals_1, primals_2, primals_3, primals_4, primals_5,
buf7, buf11, buf14, buf15, reinterpret_tensor(buf0, (4, 8, 4), (32,
1, 8), 0))
class NonLocalBlockNew(nn.Module):
def __init__(self, channel):
super(NonLocalBlockNew, self).__init__()
self.inter_channel = channel // 2
self.conv_phi = nn.Conv2d(in_channels=channel, out_channels=self.
inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_theta = nn.Conv2d(in_channels=channel, out_channels=self.
inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_g = nn.Conv2d(in_channels=channel, out_channels=self.
inter_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax = nn.Softmax(dim=1)
self.conv_mask = nn.Conv2d(in_channels=self.inter_channel,
out_channels=channel, kernel_size=1, stride=1, padding=0, bias=
False)
def forward(self, input_0):
primals_2 = self.conv_phi.weight
primals_3 = self.conv_theta.weight
primals_4 = self.conv_g.weight
primals_5 = self.conv_mask.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Jinming-Su/SGNet
|
NonLocalBlock
| false
| 8,363
|
[
"MIT"
] | 13
|
fcf35edaf332c1a4e2713acad5a0fc0e21509c3e
|
https://github.com/Jinming-Su/SGNet/tree/fcf35edaf332c1a4e2713acad5a0fc0e21509c3e
|
SoftCrossEntropyLoss
|
import torch
def soft_cross_entropy(logit, label, weight=None, reduce=None, reduction='mean'
):
if weight is not None and weight.requires_grad:
raise RuntimeError('gradient for weight is not supported')
losses = SoftCrossEntropyFunction.apply(logit, label, weight)
reduction = {(True): 'mean', (False): 'none', None: reduction}[reduce]
if reduction == 'mean':
return losses.mean()
elif reduction == 'sum':
return losses.sum()
elif reduction == 'none':
return losses
else:
raise ValueError('invalid value for reduction: {}'.format(reduction))
class SoftCrossEntropyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logit, label, weight=None):
assert logit.size() == label.size(), 'logit.size() != label.size()'
dim = logit.dim()
max_logit = logit.max(dim - 1, keepdim=True)[0]
logit = logit - max_logit
exp_logit = logit.exp()
exp_sum = exp_logit.sum(dim - 1, keepdim=True)
prob = exp_logit / exp_sum
log_exp_sum = exp_sum.log()
neg_log_prob = log_exp_sum - logit
if weight is None:
weighted_label = label
else:
if weight.size() != (logit.size(-1),):
raise ValueError(
'since logit.size() = {}, weight.size() should be ({},), but got {}'
.format(logit.size(), logit.size(-1), weight.size()))
size = [1] * label.dim()
size[-1] = label.size(-1)
weighted_label = label * weight.view(size)
ctx.save_for_backward(weighted_label, prob)
out = (neg_log_prob * weighted_label).sum(dim - 1)
return out
@staticmethod
def backward(ctx, grad_output):
weighted_label, prob = ctx.saved_tensors
old_size = weighted_label.size()
K = old_size[-1]
B = weighted_label.numel() // K
grad_output = grad_output.view(B, 1)
weighted_label = weighted_label.view(B, K)
prob = prob.view(B, K)
grad_input = grad_output * (prob * weighted_label.sum(1, True) -
weighted_label)
grad_input = grad_input.view(old_size)
return grad_input, None, None
class SoftCrossEntropyLoss(torch.nn.Module):
def __init__(self, weight=None, reduce=None, reduction='mean'):
super(SoftCrossEntropyLoss, self).__init__()
self.weight = weight
self.reduce = reduce
self.reduction = reduction
def forward(self, logit, label, weight=None):
if weight is None:
weight = self.weight
return soft_cross_entropy(logit, label, weight, self.reduce, self.
reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_exp_log_mean_mul_sub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp11 - tmp0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 - tmp2
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp11 - tmp5
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp11 - tmp8
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp30 = 64.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_exp_log_mean_mul_sub_sum_1[grid(1)](buf2, buf0,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
def soft_cross_entropy(logit, label, weight=None, reduce=None, reduction='mean'
):
if weight is not None and weight.requires_grad:
raise RuntimeError('gradient for weight is not supported')
losses = SoftCrossEntropyFunction.apply(logit, label, weight)
reduction = {(True): 'mean', (False): 'none', None: reduction}[reduce]
if reduction == 'mean':
return losses.mean()
elif reduction == 'sum':
return losses.sum()
elif reduction == 'none':
return losses
else:
raise ValueError('invalid value for reduction: {}'.format(reduction))
class SoftCrossEntropyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logit, label, weight=None):
assert logit.size() == label.size(), 'logit.size() != label.size()'
dim = logit.dim()
max_logit = logit.max(dim - 1, keepdim=True)[0]
logit = logit - max_logit
exp_logit = logit.exp()
exp_sum = exp_logit.sum(dim - 1, keepdim=True)
prob = exp_logit / exp_sum
log_exp_sum = exp_sum.log()
neg_log_prob = log_exp_sum - logit
if weight is None:
weighted_label = label
else:
if weight.size() != (logit.size(-1),):
raise ValueError(
'since logit.size() = {}, weight.size() should be ({},), but got {}'
.format(logit.size(), logit.size(-1), weight.size()))
size = [1] * label.dim()
size[-1] = label.size(-1)
weighted_label = label * weight.view(size)
ctx.save_for_backward(weighted_label, prob)
out = (neg_log_prob * weighted_label).sum(dim - 1)
return out
@staticmethod
def backward(ctx, grad_output):
weighted_label, prob = ctx.saved_tensors
old_size = weighted_label.size()
K = old_size[-1]
B = weighted_label.numel() // K
grad_output = grad_output.view(B, 1)
weighted_label = weighted_label.view(B, K)
prob = prob.view(B, K)
grad_input = grad_output * (prob * weighted_label.sum(1, True) -
weighted_label)
grad_input = grad_input.view(old_size)
return grad_input, None, None
class SoftCrossEntropyLossNew(torch.nn.Module):
def __init__(self, weight=None, reduce=None, reduction='mean'):
super(SoftCrossEntropyLossNew, self).__init__()
self.weight = weight
self.reduce = reduce
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Jingkang50/ICCV21_SCOOD
|
SoftCrossEntropyLoss
| false
| 8,364
|
[
"MIT"
] | 34
|
51204e3788a9e81aa334611072bef106fd9d13ad
|
https://github.com/Jingkang50/ICCV21_SCOOD/tree/51204e3788a9e81aa334611072bef106fd9d13ad
|
MaxPool2dSamePadding
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_same_padding(in_size, kernel_size, stride):
"""'Same 'same' operation with tensorflow
notice:padding=(0, 1, 0, 1) and padding=(1, 1, 1, 1) are different
padding=(1, 1, 1, 1):
out(H, W) = (in + [2 * padding] − kernel_size) // stride + 1
'same' padding=(0, 1, 0, 1):
out(H, W) = (in + [2 * padding] − kernel_size) / stride + 1
:param in_size: Union[int, tuple(in_h, in_w)]
:param kernel_size: Union[int, tuple(kernel_h, kernel_w)]
:param stride: Union[int, tuple(stride_h, stride_w)]
:return: padding: tuple(left, right, top, bottom)
"""
in_h, in_w = (in_size, in_size) if isinstance(in_size, int) else in_size
kernel_h, kernel_w = (kernel_size, kernel_size) if isinstance(kernel_size,
int) else kernel_size
stride_h, stride_w = (stride, stride) if isinstance(stride, int
) else stride
out_h, out_w = math.ceil(in_h / stride_h), math.ceil(in_w / stride_w)
pad_h = max((out_h - 1) * stride_h + kernel_h - in_h, 0)
pad_w = max((out_w - 1) * stride_w + kernel_w - in_w, 0)
return pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
class MaxPool2dSamePadding(nn.MaxPool2d):
"""MaxPool2dDynamicSamePadding
由于输入大小都是128的倍数,所以动态池化和静态池化的结果是一致的。此处用动态池化代替静态池化,因为实现方便。
Since the input size is a multiple of 128,
the results of dynamic maxpool and static maxpool are consistent.
Here, dynamic maxpool is used instead of static maxpool,
because it is convenient to implement"""
def __init__(self, kernel_size, stride):
self.kernel_size = kernel_size
self.stride = stride
super(MaxPool2dSamePadding, self).__init__(kernel_size, stride)
def forward(self, x):
padding = get_same_padding(x.shape[-2:], self.kernel_size, self.stride)
x = F.pad(x, padding)
x = super().forward(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp8 & tmp13
tmp16 = tmp15 & tmp14
tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp8 & tmp20
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0)
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp8 & tmp27
tmp30 = tmp29 & tmp28
tmp31 = tl.load(in_ptr0 + (-2 + x4), tmp30 & xmask, other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp6
tmp38 = tmp37 & tmp7
tmp39 = tl.load(in_ptr0 + (-1 + x4), tmp38 & xmask, other=0.0)
tmp40 = triton_helpers.maximum(tmp39, tmp32)
tmp41 = tmp36 & tmp13
tmp42 = tmp41 & tmp14
tmp43 = tl.load(in_ptr0 + x4, tmp42 & xmask, other=0.0)
tmp44 = triton_helpers.maximum(tmp43, tmp40)
tmp45 = tmp36 & tmp20
tmp46 = tmp45 & tmp21
tmp47 = tl.load(in_ptr0 + (1 + x4), tmp46 & xmask, other=0.0)
tmp48 = triton_helpers.maximum(tmp47, tmp44)
tmp49 = tmp36 & tmp27
tmp50 = tmp49 & tmp28
tmp51 = tl.load(in_ptr0 + (2 + x4), tmp50 & xmask, other=0.0)
tmp52 = triton_helpers.maximum(tmp51, tmp48)
tmp53 = 1 + x1
tmp54 = tmp53 >= tmp1
tmp55 = tmp53 < tmp3
tmp56 = tmp54 & tmp55
tmp57 = tmp56 & tmp6
tmp58 = tmp57 & tmp7
tmp59 = tl.load(in_ptr0 + (3 + x4), tmp58 & xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp59, tmp52)
tmp61 = tmp56 & tmp13
tmp62 = tmp61 & tmp14
tmp63 = tl.load(in_ptr0 + (4 + x4), tmp62 & xmask, other=0.0)
tmp64 = triton_helpers.maximum(tmp63, tmp60)
tmp65 = tmp56 & tmp20
tmp66 = tmp65 & tmp21
tmp67 = tl.load(in_ptr0 + (5 + x4), tmp66 & xmask, other=0.0)
tmp68 = triton_helpers.maximum(tmp67, tmp64)
tmp69 = tmp56 & tmp27
tmp70 = tmp69 & tmp28
tmp71 = tl.load(in_ptr0 + (6 + x4), tmp70 & xmask, other=0.0)
tmp72 = triton_helpers.maximum(tmp71, tmp68)
tmp73 = 2 + x1
tmp74 = tmp73 >= tmp1
tmp75 = tmp73 < tmp3
tmp76 = tmp74 & tmp75
tmp77 = tmp76 & tmp6
tmp78 = tmp77 & tmp7
tmp79 = tl.load(in_ptr0 + (7 + x4), tmp78 & xmask, other=0.0)
tmp80 = triton_helpers.maximum(tmp79, tmp72)
tmp81 = tmp76 & tmp13
tmp82 = tmp81 & tmp14
tmp83 = tl.load(in_ptr0 + (8 + x4), tmp82 & xmask, other=0.0)
tmp84 = triton_helpers.maximum(tmp83, tmp80)
tmp85 = tmp76 & tmp20
tmp86 = tmp85 & tmp21
tmp87 = tl.load(in_ptr0 + (9 + x4), tmp86 & xmask, other=0.0)
tmp88 = triton_helpers.maximum(tmp87, tmp84)
tmp89 = tmp76 & tmp27
tmp90 = tmp89 & tmp28
tmp91 = tl.load(in_ptr0 + (10 + x4), tmp90 & xmask, other=0.0)
tmp92 = triton_helpers.maximum(tmp91, tmp88)
tl.store(out_ptr0 + x4, tmp92, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def get_same_padding(in_size, kernel_size, stride):
"""'Same 'same' operation with tensorflow
notice:padding=(0, 1, 0, 1) and padding=(1, 1, 1, 1) are different
padding=(1, 1, 1, 1):
out(H, W) = (in + [2 * padding] − kernel_size) // stride + 1
'same' padding=(0, 1, 0, 1):
out(H, W) = (in + [2 * padding] − kernel_size) / stride + 1
:param in_size: Union[int, tuple(in_h, in_w)]
:param kernel_size: Union[int, tuple(kernel_h, kernel_w)]
:param stride: Union[int, tuple(stride_h, stride_w)]
:return: padding: tuple(left, right, top, bottom)
"""
in_h, in_w = (in_size, in_size) if isinstance(in_size, int) else in_size
kernel_h, kernel_w = (kernel_size, kernel_size) if isinstance(kernel_size,
int) else kernel_size
stride_h, stride_w = (stride, stride) if isinstance(stride, int
) else stride
out_h, out_w = math.ceil(in_h / stride_h), math.ceil(in_w / stride_w)
pad_h = max((out_h - 1) * stride_h + kernel_h - in_h, 0)
pad_w = max((out_w - 1) * stride_w + kernel_w - in_w, 0)
return pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
class MaxPool2dSamePaddingNew(nn.MaxPool2d):
"""MaxPool2dDynamicSamePadding
由于输入大小都是128的倍数,所以动态池化和静态池化的结果是一致的。此处用动态池化代替静态池化,因为实现方便。
Since the input size is a multiple of 128,
the results of dynamic maxpool and static maxpool are consistent.
Here, dynamic maxpool is used instead of static maxpool,
because it is convenient to implement"""
def __init__(self, kernel_size, stride):
self.kernel_size = kernel_size
self.stride = stride
super(MaxPool2dSamePaddingNew, self).__init__(kernel_size, stride)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Jintao-Huang/EfficientDet_PyTorch
|
MaxPool2dSamePadding
| false
| 8,365
|
[
"Apache-2.0"
] | 18
|
79616be397b7f57992cd43b772f65b58b5e25a8b
|
https://github.com/Jintao-Huang/EfficientDet_PyTorch/tree/79616be397b7f57992cd43b772f65b58b5e25a8b
|
SoftSelectPrototype
|
import torch
import torch.nn as nn
class SoftSelectAttention(nn.Module):
def __init__(self, hidden_size):
super(SoftSelectAttention, self).__init__()
def forward(self, support, query):
"""
:param support: [few, dim]
:param query: [batch, dim]
:return:
"""
query_ = query.unsqueeze(1).expand(query.size(0), support.size(0),
query.size(1)).contiguous()
support_ = support.unsqueeze(0).expand_as(query_).contiguous()
scalar = support.size(1) ** -0.5
score = torch.sum(query_ * support_, dim=2) * scalar
att = torch.softmax(score, dim=1)
center = torch.mm(att, support)
return center
class SoftSelectPrototype(nn.Module):
def __init__(self, r_dim):
super(SoftSelectPrototype, self).__init__()
self.Attention = SoftSelectAttention(hidden_size=r_dim)
def forward(self, support, query):
center = self.Attention(support, query)
return center
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'r_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 1.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_mul_sum_0[grid(16)](arg0_1, arg1_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.mm(buf2, arg1_1, out=buf3)
del arg1_1
del buf2
return buf3,
class SoftSelectAttention(nn.Module):
def __init__(self, hidden_size):
super(SoftSelectAttention, self).__init__()
def forward(self, support, query):
"""
:param support: [few, dim]
:param query: [batch, dim]
:return:
"""
query_ = query.unsqueeze(1).expand(query.size(0), support.size(0),
query.size(1)).contiguous()
support_ = support.unsqueeze(0).expand_as(query_).contiguous()
scalar = support.size(1) ** -0.5
score = torch.sum(query_ * support_, dim=2) * scalar
att = torch.softmax(score, dim=1)
center = torch.mm(att, support)
return center
class SoftSelectPrototypeNew(nn.Module):
def __init__(self, r_dim):
super(SoftSelectPrototypeNew, self).__init__()
self.Attention = SoftSelectAttention(hidden_size=r_dim)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JiaweiSheng/FAAN
|
SoftSelectPrototype
| false
| 8,366
|
[
"MIT"
] | 41
|
b439b829506c4e2e9044a6b2ab7f3d844f445a95
|
https://github.com/JiaweiSheng/FAAN/tree/b439b829506c4e2e9044a6b2ab7f3d844f445a95
|
Critic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, n_obs, action_dim, hidden_size, init_w=0.003):
super(Critic, self).__init__()
self.linear1 = nn.Linear(n_obs + action_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_obs': 4, 'action_dim': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class CriticNew(nn.Module):
def __init__(self, n_obs, action_dim, hidden_size, init_w=0.003):
super(CriticNew, self).__init__()
self.linear1 = nn.Linear(n_obs + action_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_1 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
JohnJim0816/rl-tutorials
|
Critic
| false
| 8,367
|
[
"MIT"
] | 16
|
e99daea815da85f9f25dff2d01b030249a203d22
|
https://github.com/JohnJim0816/rl-tutorials/tree/e99daea815da85f9f25dff2d01b030249a203d22
|
GlobalAveragePooling
|
import torch
import torch.nn as nn
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
"""
def __init__(self):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d((1, 1))
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple([out.view(x.size(0), -1) for out, x in zip(outs,
inputs)])
elif isinstance(inputs, list):
outs = [self.gap(x) for x in inputs]
outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
class GlobalAveragePoolingNew(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
"""
def __init__(self):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d((1, 1))
def init_weights(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Jackqu/mmpose
|
GlobalAveragePooling
| false
| 8,368
|
[
"Apache-2.0"
] | 38
|
ad8acc5ff5da7993c6befdc4b1ced2c2ecb64533
|
https://github.com/Jackqu/mmpose/tree/ad8acc5ff5da7993c6befdc4b1ced2c2ecb64533
|
GlobalAttentionGeneral
|
import torch
import torch.nn as nn
import torch.nn.parallel
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context_key, content_value):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context_key.size(0), context_key.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context_key
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data, -float('inf'))
attn = self.sm(attn)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(content_value, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4,
4, 4])]
def get_init_inputs():
return [[], {'idf': 4, 'cdf': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf0, arg1_1, out=buf1)
del arg1_1
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 16)](buf2, buf3, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0)
del buf2
extern_kernels.bmm(arg2_1, buf3, out=buf4)
del arg2_1
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
class GlobalAttentionGeneralNew(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneralNew, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
JoonHong-Kim/T2I_CL
|
GlobalAttentionGeneral
| false
| 8,369
|
[
"MIT"
] | 35
|
c52aa73da903d6e4174eeef2663e5bc1163785b1
|
https://github.com/JoonHong-Kim/T2I_CL/tree/c52aa73da903d6e4174eeef2663e5bc1163785b1
|
PolicyNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
class PolicyNet(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim, init_w=0.003,
log_std_min=-20, log_std_max=2):
super(PolicyNet, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, action_dim)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_dim, action_dim)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def evaluate(self, state, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
action = action.detach().cpu().numpy()
return action[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf5,
primals_9, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del primals_9
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0
), buf7, primals_8, primals_6, buf8, primals_4, buf9
class PolicyNetNew(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim, init_w=0.003,
log_std_min=-20, log_std_max=2):
super(PolicyNetNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, action_dim)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_dim, action_dim)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def evaluate(self, state, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
action = action.detach().cpu().numpy()
return action[0]
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.mean_linear.weight
primals_7 = self.mean_linear.bias
primals_8 = self.log_std_linear.weight
primals_9 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
JohnJim0816/rl-tutorials
|
PolicyNet
| false
| 8,370
|
[
"MIT"
] | 16
|
e99daea815da85f9f25dff2d01b030249a203d22
|
https://github.com/JohnJim0816/rl-tutorials/tree/e99daea815da85f9f25dff2d01b030249a203d22
|
SE
|
import torch
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, x):
x_in = x
for module in self.children():
x = module(x)
return x_in * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'ratio': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1, 1, 1), (16, 1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1,
1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (16, 1, 1, 1), (1, 16, 16, 16), 0)
del buf2
buf7 = empty_strided_cuda((16, 1, 1, 1), (1, 1, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3,
primals_3, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 16,
1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(4)](buf5, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1,
16, 1, 1, 1), (16, 1, 1, 1, 1), 0), buf5, buf7
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SENew(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SENew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JaywongWang/SlowFast
|
SE
| false
| 8,371
|
[
"Apache-2.0"
] | 43
|
366467aafc856712fdc3e9c4cce8e90969047ee6
|
https://github.com/JaywongWang/SlowFast/tree/366467aafc856712fdc3e9c4cce8e90969047ee6
|
WasLoss
|
import torch
import torch.nn as nn
class WasLoss(nn.Module):
def __init__(self):
super(WasLoss, self).__init__()
self.MSEls = torch.nn.BCEWithLogitsLoss()
def forward(self, true_data, fake_data):
SLX, _ = torch.sort(true_data, 0)
SLG, _ = torch.sort(fake_data, 0)
return self.MSEls(SLG - SLX, torch.ones_like(SLX))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * r1), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1,
stable=False, descending=False)
tl.store(out_ptr0 + (x0 + 64 * r1), tmp5, xmask)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_sub_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = tmp3 * tmp2
tmp5 = triton_helpers.minimum(tmp3, tmp2)
tmp6 = tl_math.abs(tmp2)
tmp7 = -tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = libdevice.log1p(tmp8)
tmp10 = tmp5 - tmp9
tmp11 = tmp4 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_sort_0[grid(64)](arg0_1, buf0, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_sort_0[grid(64)](arg1_1, buf2, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_binary_cross_entropy_with_logits_sub_1[grid(1)](buf5,
buf2, buf0, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf5,
class WasLossNew(nn.Module):
def __init__(self):
super(WasLossNew, self).__init__()
self.MSEls = torch.nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Johnson-yue/RS-GAN
|
WasLoss
| false
| 8,372
|
[
"MIT"
] | 26
|
8e8723045d63d8f9a4b510800cd909e7a6e3d195
|
https://github.com/Johnson-yue/RS-GAN/tree/8e8723045d63d8f9a4b510800cd909e7a6e3d195
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, n_obs, action_dim, hidden_size, init_w=0.003):
super(Actor, self).__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, action_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_obs': 4, 'action_dim': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, n_obs, action_dim, hidden_size, init_w=0.003):
super(ActorNew, self).__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, action_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JohnJim0816/rl-tutorials
|
Actor
| false
| 8,373
|
[
"MIT"
] | 16
|
e99daea815da85f9f25dff2d01b030249a203d22
|
https://github.com/JohnJim0816/rl-tutorials/tree/e99daea815da85f9f25dff2d01b030249a203d22
|
Mish
|
import torch
import torch.utils.data
from torchvision.transforms import functional as F
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
from itertools import product as product
class Mish(nn.Module):
def forward(self, x):
return x.mul_(F.softplus(x).tanh())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return arg0_1,
class MishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Het-Shah/Monk_Object_Detection
|
Mish
| false
| 8,374
|
[
"Apache-2.0"
] | 15
|
1d7a07193ea3455221caa41d07c33c81d50c6b3f
|
https://github.com/Het-Shah/Monk_Object_Detection/tree/1d7a07193ea3455221caa41d07c33c81d50c6b3f
|
AttentionPool2d
|
import math
import torch
from torch import nn
import torch as th
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class QKVAttention(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv, encoder_kv=None):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch,
dim=1)
if encoder_kv is not None:
assert encoder_kv.shape[1] == self.n_heads * ch * 2
ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch
, dim=1)
k = th.cat([ek, k], dim=-1)
v = th.cat([ev, v], dim=-1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum('bct,bcs->bts', q * scale, k * scale)
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v)
return a.reshape(bs, -1, length)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(self, spacial_dim: 'int', embed_dim: 'int',
num_heads_channels: 'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim,
spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1)
x = x + self.positional_embedding[None, :, :]
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 17
x3 = xindex // 17
x4 = xindex % 68
x5 = xindex
tmp15 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 17, tl.int64)
tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x5, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 68
x3 = xindex % 68
x1 = xindex // 17 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 204 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 68
x3 = xindex % 68
x1 = xindex // 17 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (68 + x3 + 204 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 68
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 17 * x0), tmp11, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 17 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 68 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 17 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 17 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 17), (17, 1))
assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2,
buf1, 272, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 12, 17), (204, 17, 1))
buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_mul_2[grid(272)](buf2, primals_4, buf3, 272,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_mul_3[grid(272)](buf2, primals_4, buf4, 272,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17),
0), buf4, out=buf5)
buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32)
triton_per_fused__softmax_4[grid(68)](buf5, buf8, 68, 17, XBLOCK=1,
num_warps=2, num_stages=1)
del buf5
buf9 = buf2
del buf2
triton_poi_fused_convolution_5[grid(816)](buf9, primals_4, 816,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204,
1, 17), 136), out=buf10)
buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_convolution_6[grid(16, 17)](buf10, buf11, 16, 17,
XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 17), (68, 17, 1))
del buf11
buf13 = buf12
del buf12
triton_poi_fused_convolution_7[grid(272)](buf13, primals_6, 272,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return reinterpret_tensor(buf13, (4, 4), (68, 17), 0
), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4,
4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204,
17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0
)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class QKVAttention(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv, encoder_kv=None):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch,
dim=1)
if encoder_kv is not None:
assert encoder_kv.shape[1] == self.n_heads * ch * 2
ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch
, dim=1)
k = th.cat([ek, k], dim=-1)
v = th.cat([ev, v], dim=-1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum('bct,bcs->bts', q * scale, k * scale)
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v)
return a.reshape(bs, -1, length)
class AttentionPool2dNew(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(self, spacial_dim: 'int', embed_dim: 'int',
num_heads_channels: 'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim,
spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, input_0):
primals_2 = self.positional_embedding
primals_3 = self.qkv_proj.weight
primals_4 = self.qkv_proj.bias
primals_5 = self.c_proj.weight
primals_6 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Jack000/glid-3
|
AttentionPool2d
| false
| 8,375
|
[
"MIT"
] | 31
|
4a18efc2785339ebc743e149a7955e34fff436fb
|
https://github.com/Jack000/glid-3/tree/4a18efc2785339ebc743e149a7955e34fff436fb
|
GaussianKernel
|
import math
import torch
import torch.nn as nn
import torch.utils.data
class GaussianKernel(nn.Module):
def __init__(self, delta_var, pmaps_threshold):
super().__init__()
self.delta_var = delta_var
self.two_sigma = delta_var * delta_var / -math.log(pmaps_threshold)
def forward(self, dist_map):
return torch.exp(-dist_map * dist_map / self.two_sigma)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'delta_var': 4, 'pmaps_threshold': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = -0.08664339756999316
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GaussianKernelNew(nn.Module):
def __init__(self, delta_var, pmaps_threshold):
super().__init__()
self.delta_var = delta_var
self.two_sigma = delta_var * delta_var / -math.log(pmaps_threshold)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JonasHell/torch-em
|
GaussianKernel
| false
| 8,376
|
[
"MIT"
] | 13
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
https://github.com/JonasHell/torch-em/tree/2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
Attention
|
import torch
from torch import nn
import torch.nn.functional as F
class Attention(nn.Module):
"""
Applies an attention mechanism on the output features from the decoder.
"""
def __init__(self, dim):
super(Attention, self).__init__()
self.dim = dim
self.linear1 = nn.Linear(dim * 2, dim)
self.linear2 = nn.Linear(dim, 1, bias=False)
def forward(self, hidden_state, encoder_outputs):
"""
Arguments:
hidden_state {Variable} -- batch_size x dim
encoder_outputs {Variable} -- batch_size x seq_len x dim
Returns:
Variable -- context vector of size batch_size x dim
"""
batch_size, seq_len, _ = encoder_outputs.size()
hidden_state = hidden_state.unsqueeze(1).repeat(1, seq_len, 1)
inputs = torch.cat((encoder_outputs, hidden_state), 2).view(-1,
self.dim * 2)
o = self.linear2(torch.tanh(self.linear1(inputs)))
e = o.view(batch_size, seq_len)
alpha = F.softmax(e, dim=1)
context = torch.bmm(alpha.unsqueeze(1), encoder_outputs).squeeze(1)
return context
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = xindex // 8
x2 = xindex // 32
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0
), primals_1, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4, 4), (4, 1), 0), reinterpret_tensor(buf0
, (16, 8), (8, 1), 0), buf2, buf3, reinterpret_tensor(primals_1, (4,
4, 4), (16, 1, 4), 0), primals_5
class AttentionNew(nn.Module):
"""
Applies an attention mechanism on the output features from the decoder.
"""
def __init__(self, dim):
super(AttentionNew, self).__init__()
self.dim = dim
self.linear1 = nn.Linear(dim * 2, dim)
self.linear2 = nn.Linear(dim, 1, bias=False)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JiwanChung/tapm
|
Attention
| false
| 8,377
|
[
"MIT"
] | 14
|
ec42b139d1c012daccc55f85e67744488d526476
|
https://github.com/JiwanChung/tapm/tree/ec42b139d1c012daccc55f85e67744488d526476
|
Net
|
import torch
import torch.nn as nn
class Swish(nn.Module):
def __init__(self, inplace=True):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
if self.inplace:
x.mul_(torch.sigmoid(x))
return x
else:
return x * torch.sigmoid(x)
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden):
super(Net, self).__init__()
self.features = nn.Sequential()
self.features.add_module('hidden', torch.nn.Linear(n_feature, n_hidden)
)
self.features.add_module('active1', Swish())
self.features.add_module('hidden2', torch.nn.Linear(n_hidden, n_hidden)
)
self.features.add_module('active2', Swish())
self.features.add_module('hidden3', torch.nn.Linear(n_hidden, n_hidden)
)
self.features.add_module('active3', Swish())
self.features.add_module('predict', torch.nn.Linear(n_hidden, 3))
def forward(self, x):
return self.features(x)
def reset_parameters(self, verbose=False):
for module in self.modules():
if isinstance(module, self.__class__):
continue
if 'reset_parameters' in dir(module):
if callable(module.reset_parameters):
module.reset_parameters()
if verbose:
None
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feature': 4, 'n_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (3, 4), (4, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0)
del buf4
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_7
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf6, buf7, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8,
(4, 3), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_9
return reinterpret_tensor(buf9, (4, 4, 4, 3), (48, 12, 3, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf2, buf3, buf5, buf6, buf8, primals_8, primals_6, primals_4
class Swish(nn.Module):
def __init__(self, inplace=True):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
if self.inplace:
x.mul_(torch.sigmoid(x))
return x
else:
return x * torch.sigmoid(x)
class NetNew(torch.nn.Module):
def __init__(self, n_feature, n_hidden):
super(NetNew, self).__init__()
self.features = nn.Sequential()
self.features.add_module('hidden', torch.nn.Linear(n_feature, n_hidden)
)
self.features.add_module('active1', Swish())
self.features.add_module('hidden2', torch.nn.Linear(n_hidden, n_hidden)
)
self.features.add_module('active2', Swish())
self.features.add_module('hidden3', torch.nn.Linear(n_hidden, n_hidden)
)
self.features.add_module('active3', Swish())
self.features.add_module('predict', torch.nn.Linear(n_hidden, 3))
def reset_parameters(self, verbose=False):
for module in self.modules():
if isinstance(module, self.__class__):
continue
if 'reset_parameters' in dir(module):
if callable(module.reset_parameters):
module.reset_parameters()
if verbose:
None
def forward(self, input_0):
primals_1 = self.features.hidden.weight
primals_2 = self.features.hidden.bias
primals_4 = self.features.hidden2.weight
primals_5 = self.features.hidden2.bias
primals_6 = self.features.hidden3.weight
primals_7 = self.features.hidden3.bias
primals_8 = self.features.predict.weight
primals_9 = self.features.predict.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Jianxun-Wang/Physics-constrained-Bayesian-deep-learning
|
Net
| false
| 8,378
|
[
"MIT"
] | 24
|
cde0287f848f83c6def1fe409c67d7d4e14174da
|
https://github.com/Jianxun-Wang/Physics-constrained-Bayesian-deep-learning/tree/cde0287f848f83c6def1fe409c67d7d4e14174da
|
Block
|
import torch
from torch import nn
import torch.nn.functional as F
class Block(nn.Module):
def __init__(self, dim):
super(Block, self).__init__()
self.dim = dim
self.layer_norm = nn.LayerNorm(self.dim)
self.conv = nn.Conv1d(self.dim, self.dim, kernel_size=3, padding=1)
def forward(self, x):
x_orig = x
x = F.relu(x)
x = self.layer_norm(x)
x = x.transpose(1, 2)
x = self.conv(x)
x = x.transpose(1, 2)
return x + x_orig
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_relu_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_relu_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(64)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_2[grid(16, 4)](buf2, buf3, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
del buf3
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0)
del buf4
triton_poi_fused_add_3[grid(16, 4)](buf5, primals_5, primals_1, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return buf5, primals_1, primals_4, reinterpret_tensor(buf2, (4, 4, 4),
(16, 1, 4), 0)
class BlockNew(nn.Module):
def __init__(self, dim):
super(BlockNew, self).__init__()
self.dim = dim
self.layer_norm = nn.LayerNorm(self.dim)
self.conv = nn.Conv1d(self.dim, self.dim, kernel_size=3, padding=1)
def forward(self, input_0):
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_4 = self.conv.weight
primals_5 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JiwanChung/tapm
|
Block
| false
| 8,379
|
[
"MIT"
] | 14
|
ec42b139d1c012daccc55f85e67744488d526476
|
https://github.com/JiwanChung/tapm/tree/ec42b139d1c012daccc55f85e67744488d526476
|
RLFeatPreprocessNet
|
import torch
import torch.nn as nn
import torch.utils.data
class RLFeatPreprocessNet(nn.Module):
"""
Preprocess Features
1. visual feature
2. label prediction embed feature
3. box embed
4. overlap embed
"""
def __init__(self, feat_size, embed_size, bbox_size, overlap_size,
output_size):
super(RLFeatPreprocessNet, self).__init__()
self.feature_size = feat_size
self.embed_size = embed_size
self.box_info_size = bbox_size
self.overlap_info_size = overlap_size
self.output_size = output_size
self.resize_feat = nn.Linear(self.feature_size, int(output_size / 4))
self.resize_embed = nn.Linear(self.embed_size, int(output_size / 4))
self.resize_box = nn.Linear(self.box_info_size, int(output_size / 4))
self.resize_overlap = nn.Linear(self.overlap_info_size, int(
output_size / 4))
self.resize_feat.weight.data.normal_(0, 0.001)
self.resize_embed.weight.data.normal_(0, 0.01)
self.resize_box.weight.data.normal_(0, 1)
self.resize_overlap.weight.data.normal_(0, 1)
self.resize_feat.bias.data.zero_()
self.resize_embed.bias.data.zero_()
self.resize_box.bias.data.zero_()
self.resize_overlap.bias.data.zero_()
def forward(self, obj_feat, obj_embed, box_info, overlap_info):
resized_obj = self.resize_feat(obj_feat)
resized_embed = self.resize_embed(obj_embed)
resized_box = self.resize_box(box_info)
resized_overlap = self.resize_overlap(overlap_info)
output_feat = torch.cat((resized_obj, resized_embed, resized_box,
resized_overlap), 1)
return output_feat
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feat_size': 4, 'embed_size': 4, 'bbox_size': 4,
'overlap_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x0 = xindex % 4
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask,
other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1) + 16 * x2), tmp16 &
xmask, other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (1, 4), (4, 1))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (64,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf5)
del primals_7
del primals_8
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(primals_12, (64,
4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4),
0), alpha=1, beta=1, out=buf7)
del primals_10
del primals_11
buf8 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf1, buf3, buf5, buf7, buf8, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del buf5
del buf7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_12, (64, 4), (4, 1), 0)
class RLFeatPreprocessNetNew(nn.Module):
"""
Preprocess Features
1. visual feature
2. label prediction embed feature
3. box embed
4. overlap embed
"""
def __init__(self, feat_size, embed_size, bbox_size, overlap_size,
output_size):
super(RLFeatPreprocessNetNew, self).__init__()
self.feature_size = feat_size
self.embed_size = embed_size
self.box_info_size = bbox_size
self.overlap_info_size = overlap_size
self.output_size = output_size
self.resize_feat = nn.Linear(self.feature_size, int(output_size / 4))
self.resize_embed = nn.Linear(self.embed_size, int(output_size / 4))
self.resize_box = nn.Linear(self.box_info_size, int(output_size / 4))
self.resize_overlap = nn.Linear(self.overlap_info_size, int(
output_size / 4))
self.resize_feat.weight.data.normal_(0, 0.001)
self.resize_embed.weight.data.normal_(0, 0.01)
self.resize_box.weight.data.normal_(0, 1)
self.resize_overlap.weight.data.normal_(0, 1)
self.resize_feat.bias.data.zero_()
self.resize_embed.bias.data.zero_()
self.resize_box.bias.data.zero_()
self.resize_overlap.bias.data.zero_()
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.resize_feat.weight
primals_2 = self.resize_feat.bias
primals_4 = self.resize_embed.weight
primals_5 = self.resize_embed.bias
primals_7 = self.resize_box.weight
primals_8 = self.resize_box.bias
primals_10 = self.resize_overlap.weight
primals_11 = self.resize_overlap.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_12 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
KaihuaTang/VCTree-Visual-Question-Answering
|
RLFeatPreprocessNet
| false
| 8,380
|
[
"MIT"
] | 31
|
b6b0a8bdb01d45d36de3bded91db42544ad6a593
|
https://github.com/KaihuaTang/VCTree-Visual-Question-Answering/tree/b6b0a8bdb01d45d36de3bded91db42544ad6a593
|
ELUPlus
|
import torch
from torch import nn
import torch.nn
class ELUPlus(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x) + 1.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp8 = tmp7 + tmp3
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ELUPlusNew(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
KailinLi/nflows
|
ELUPlus
| false
| 8,381
|
[
"MIT"
] | 13
|
7c07a1d5e510beb681d1b11d6ffda95a086a8153
|
https://github.com/KailinLi/nflows/tree/7c07a1d5e510beb681d1b11d6ffda95a086a8153
|
Memory
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Memory(nn.Module):
def __init__(self):
super(Memory, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context_key, content_value):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x idf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context_key.size(0), context_key.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context_key
weight = torch.bmm(targetT, sourceT)
weight = weight.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
weight.data.masked_fill_(mask.data, -float('inf'))
weight = torch.nn.functional.softmax(weight, dim=1)
weight = weight.view(batch_size, queryL, sourceL)
weight = torch.transpose(weight, 1, 2).contiguous()
weightedContext = torch.bmm(content_value, weight)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
weight = weight.view(batch_size, -1, ih, iw)
return weightedContext, weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4,
4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf0, arg1_1, out=buf1)
del arg1_1
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 16)](buf2, buf3, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0)
del buf2
extern_kernels.bmm(arg2_1, buf3, out=buf4)
del arg2_1
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
class MemoryNew(nn.Module):
def __init__(self):
super(MemoryNew, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
JoonHong-Kim/T2I_CL
|
Memory
| false
| 8,382
|
[
"MIT"
] | 35
|
c52aa73da903d6e4174eeef2663e5bc1163785b1
|
https://github.com/JoonHong-Kim/T2I_CL/tree/c52aa73da903d6e4174eeef2663e5bc1163785b1
|
DiceLossWithLogits
|
import torch
import torch.nn as nn
import torch.utils.data
def flatten_samples(input_):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
num_channels = input_.size(1)
permute_axes = list(range(input_.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = input_.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
def dice_score(input_, target, invert=False, channelwise=True, eps=1e-07):
if channelwise:
input_ = flatten_samples(input_)
target = flatten_samples(target)
numerator = (input_ * target).sum(-1)
denominator = (input_ * input_).sum(-1) + (target * target).sum(-1)
channelwise_score = 2 * (numerator / denominator.clamp(min=eps))
if invert:
channelwise_score = 1.0 - channelwise_score
score = channelwise_score.sum()
else:
numerator = (input_ * target).sum()
denominator = (input_ * input_).sum() + (target * target).sum()
score = 2.0 * (numerator / denominator.clamp(min=eps))
if invert:
score = 1.0 - score
return score
class DiceLossWithLogits(nn.Module):
def __init__(self, channelwise=True, eps=1e-07):
super().__init__()
self.channelwise = channelwise
self.eps = eps
self.init_kwargs = {'channelwise': channelwise, 'eps': self.eps}
def forward(self, input_, target):
return dice_score(nn.functional.sigmoid(input_), target, invert=
True, channelwise=self.channelwise, eps=self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp2 * tmp2
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp17, xmask)
@triton.jit
def triton_per_fused_add_clamp_div_mul_rsub_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp3 = tmp1 + tmp2
tmp4 = 1e-07
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp0 / tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp8
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_div_mul_rsub_sum_1[grid(1)](buf0, buf1,
buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf3,
def flatten_samples(input_):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
num_channels = input_.size(1)
permute_axes = list(range(input_.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = input_.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
def dice_score(input_, target, invert=False, channelwise=True, eps=1e-07):
if channelwise:
input_ = flatten_samples(input_)
target = flatten_samples(target)
numerator = (input_ * target).sum(-1)
denominator = (input_ * input_).sum(-1) + (target * target).sum(-1)
channelwise_score = 2 * (numerator / denominator.clamp(min=eps))
if invert:
channelwise_score = 1.0 - channelwise_score
score = channelwise_score.sum()
else:
numerator = (input_ * target).sum()
denominator = (input_ * input_).sum() + (target * target).sum()
score = 2.0 * (numerator / denominator.clamp(min=eps))
if invert:
score = 1.0 - score
return score
class DiceLossWithLogitsNew(nn.Module):
def __init__(self, channelwise=True, eps=1e-07):
super().__init__()
self.channelwise = channelwise
self.eps = eps
self.init_kwargs = {'channelwise': channelwise, 'eps': self.eps}
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JonasHell/torch-em
|
DiceLossWithLogits
| false
| 8,383
|
[
"MIT"
] | 13
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
https://github.com/JonasHell/torch-em/tree/2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
ActorNet
|
import torch
import torch.nn as nn
class ActorNet(nn.Module):
""" Actor Network """
def __init__(self, state_num, action_num, hidden1=256, hidden2=256,
hidden3=256):
"""
:param state_num: number of states
:param action_num: number of actions
:param hidden1: hidden layer 1 dimension
:param hidden2: hidden layer 2 dimension
:param hidden3: hidden layer 3 dimension
"""
super(ActorNet, self).__init__()
self.fc1 = nn.Linear(state_num, hidden1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc3 = nn.Linear(hidden2, hidden3)
self.fc4 = nn.Linear(hidden3, action_num)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
out = self.sigmoid(self.fc4(x))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_num': 4, 'action_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256), (256, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf10, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf9, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 256), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf5,
primals_7, buf8, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_sigmoid_1[grid(256)](buf7, primals_9, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), reinterpret_tensor(buf5, (64, 256), (256, 1), 0
), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10
class ActorNetNew(nn.Module):
""" Actor Network """
def __init__(self, state_num, action_num, hidden1=256, hidden2=256,
hidden3=256):
"""
:param state_num: number of states
:param action_num: number of actions
:param hidden1: hidden layer 1 dimension
:param hidden2: hidden layer 2 dimension
:param hidden3: hidden layer 3 dimension
"""
super(ActorNetNew, self).__init__()
self.fc1 = nn.Linear(state_num, hidden1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc3 = nn.Linear(hidden2, hidden3)
self.fc4 = nn.Linear(hidden3, action_num)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Kanaderu/spiking-ddpg-mapless-navigation
|
ActorNet
| false
| 8,384
|
[
"MIT"
] | 29
|
2b5e7e67385dee4428b8036bc4ffe95e812b34e0
|
https://github.com/Kanaderu/spiking-ddpg-mapless-navigation/tree/2b5e7e67385dee4428b8036bc4ffe95e812b34e0
|
StochasticClassifier
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class StochasticClassifier(nn.Module):
def __init__(self, num_features, num_classes, temp=0.05):
super().__init__()
self.mu = nn.Parameter(0.01 * torch.randn(num_classes, num_features))
self.sigma = nn.Parameter(torch.zeros(num_classes, num_features))
self.temp = temp
def forward(self, x, stochastic=True):
mu = self.mu
sigma = self.sigma
if stochastic:
sigma = F.softplus(sigma - 4)
weight = sigma * torch.randn_like(mu) + mu
else:
weight = mu
weight = F.normalize(weight, p=2, dim=1)
x = F.normalize(x, p=2, dim=1)
score = F.linear(x, weight)
score = score / self.temp
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4, 'num_classes': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_mul_softplus_sub_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp43 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp45 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp11 * tmp11
tmp14 = tmp13 - tmp1
tmp15 = tmp14 > tmp3
tmp16 = tl_math.exp(tmp14)
tmp17 = libdevice.log1p(tmp16)
tmp18 = tl.where(tmp15, tmp14, tmp17)
tmp20 = tmp18 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp25 - tmp1
tmp27 = tmp26 > tmp3
tmp28 = tl_math.exp(tmp26)
tmp29 = libdevice.log1p(tmp28)
tmp30 = tl.where(tmp27, tmp26, tmp29)
tmp32 = tmp30 * tmp31
tmp34 = tmp32 + tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp37 - tmp1
tmp39 = tmp38 > tmp3
tmp40 = tl_math.exp(tmp38)
tmp41 = libdevice.log1p(tmp40)
tmp42 = tl.where(tmp39, tmp38, tmp41)
tmp44 = tmp42 * tmp43
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp46
tmp48 = tmp36 + tmp47
tl.store(out_ptr0 + x0, tmp48, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_softplus_sub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x2, xmask)
tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-12
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp11 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_linalg_vector_norm_mul_softplus_sub_0[grid(4)](
primals_2, buf1, primals_1, buf2, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_1[grid(256)](primals_3, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_mul_softplus_sub_2[grid(16)](primals_2,
buf1, primals_1, buf2, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf2
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf4, (4, 4), (1, 4), 0), out=buf5)
del buf4
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_div_3[grid(256)](buf6, 256, XBLOCK=256, num_warps=
4, num_stages=1)
return buf6, primals_1, primals_2, buf1, reinterpret_tensor(buf3, (64,
4), (4, 1), 0)
class StochasticClassifierNew(nn.Module):
def __init__(self, num_features, num_classes, temp=0.05):
super().__init__()
self.mu = nn.Parameter(0.01 * torch.randn(num_classes, num_features))
self.sigma = nn.Parameter(torch.zeros(num_classes, num_features))
self.temp = temp
def forward(self, input_0):
primals_1 = self.mu
primals_2 = self.sigma
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KaiyangZhou/ssdg-benchmark
|
StochasticClassifier
| false
| 8,385
|
[
"MIT"
] | 43
|
aaa48be4f93b77347fbadff649be6b3e0f7a8779
|
https://github.com/KaiyangZhou/ssdg-benchmark/tree/aaa48be4f93b77347fbadff649be6b3e0f7a8779
|
Highway
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Highway(nn.Module):
"""Highway network"""
def __init__(self, input_size):
super(Highway, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=True)
self.fc2 = nn.Linear(input_size, input_size, bias=True)
def forward(self, x):
t = F.sigmoid(self.fc1(x))
return torch.mul(t, F.relu(self.fc2(x))) + torch.mul(1 - t, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf0, buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
class HighwayNew(nn.Module):
"""Highway network"""
def __init__(self, input_size):
super(HighwayNew, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=True)
self.fc2 = nn.Linear(input_size, input_size, bias=True)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Kailianghu/Character-Aware-Neural-Language-Model
|
Highway
| false
| 8,386
|
[
"MIT"
] | 35
|
6bd72ce00a3ac9eb152ba006bdae8a6922e0ad35
|
https://github.com/Kailianghu/Character-Aware-Neural-Language-Model/tree/6bd72ce00a3ac9eb152ba006bdae8a6922e0ad35
|
BCEDiceLossWithLogits
|
import torch
import torch.nn as nn
import torch.utils.data
def flatten_samples(input_):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
num_channels = input_.size(1)
permute_axes = list(range(input_.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = input_.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
def dice_score(input_, target, invert=False, channelwise=True, eps=1e-07):
if channelwise:
input_ = flatten_samples(input_)
target = flatten_samples(target)
numerator = (input_ * target).sum(-1)
denominator = (input_ * input_).sum(-1) + (target * target).sum(-1)
channelwise_score = 2 * (numerator / denominator.clamp(min=eps))
if invert:
channelwise_score = 1.0 - channelwise_score
score = channelwise_score.sum()
else:
numerator = (input_ * target).sum()
denominator = (input_ * input_).sum() + (target * target).sum()
score = 2.0 * (numerator / denominator.clamp(min=eps))
if invert:
score = 1.0 - score
return score
class BCEDiceLossWithLogits(nn.Module):
def __init__(self, alpha=1.0, beta=1.0, channelwise=True, eps=1e-07):
super().__init__()
self.alpha = alpha
self.beta = beta
self.channelwise = channelwise
self.eps = eps
self.init_kwargs = {'alpha': alpha, 'beta': beta, 'channelwise':
channelwise, 'eps': self.eps}
def forward(self, input_, target):
loss_dice = dice_score(nn.functional.sigmoid(input_), target,
invert=True, channelwise=self.channelwise, eps=self.eps)
loss_bce = nn.functional.binary_cross_entropy_with_logits(input_,
target)
return self.alpha * loss_dice + self.beta * loss_bce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp2 * tmp2
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp17, xmask)
@triton.jit
def triton_per_fused_add_clamp_div_mul_rsub_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp3 = tmp1 + tmp2
tmp4 = 1e-07
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp0 / tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp8
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mul_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp16 = tl.load(in_out_ptr0 + 0)
tmp17 = tl.broadcast_to(tmp16, [1])
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp18 = tmp17 * tmp1
tmp19 = 256.0
tmp20 = tmp15 / tmp19
tmp21 = tmp20 * tmp1
tmp22 = tmp18 + tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_div_mul_rsub_sum_1[grid(1)](buf0, buf1,
buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
buf5 = buf3
del buf3
triton_per_fused_add_binary_cross_entropy_with_logits_mul_2[grid(1)](
buf5, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf5,
def flatten_samples(input_):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
num_channels = input_.size(1)
permute_axes = list(range(input_.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = input_.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
def dice_score(input_, target, invert=False, channelwise=True, eps=1e-07):
if channelwise:
input_ = flatten_samples(input_)
target = flatten_samples(target)
numerator = (input_ * target).sum(-1)
denominator = (input_ * input_).sum(-1) + (target * target).sum(-1)
channelwise_score = 2 * (numerator / denominator.clamp(min=eps))
if invert:
channelwise_score = 1.0 - channelwise_score
score = channelwise_score.sum()
else:
numerator = (input_ * target).sum()
denominator = (input_ * input_).sum() + (target * target).sum()
score = 2.0 * (numerator / denominator.clamp(min=eps))
if invert:
score = 1.0 - score
return score
class BCEDiceLossWithLogitsNew(nn.Module):
def __init__(self, alpha=1.0, beta=1.0, channelwise=True, eps=1e-07):
super().__init__()
self.alpha = alpha
self.beta = beta
self.channelwise = channelwise
self.eps = eps
self.init_kwargs = {'alpha': alpha, 'beta': beta, 'channelwise':
channelwise, 'eps': self.eps}
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JonasHell/torch-em
|
BCEDiceLossWithLogits
| false
| 8,387
|
[
"MIT"
] | 13
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
https://github.com/JonasHell/torch-em/tree/2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.